Llama-3.1-8B-DALv0.1
/
venv
/lib
/python3.12
/site-packages
/transformers
/tokenization_utils_base.py
# coding=utf-8 | |
# Copyright 2020 The HuggingFace Inc. team. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
""" | |
Base classes common to both the slow and the fast tokenization classes: PreTrainedTokenizerBase (host all the user | |
fronting encoding methods) Special token mixing (host the special tokens logic) and BatchEncoding (wrap the dictionary | |
of output with special method for the Fast tokenizers) | |
""" | |
import copy | |
import json | |
import os | |
import re | |
import warnings | |
from collections import UserDict | |
from collections.abc import Mapping, Sized | |
from contextlib import contextmanager | |
from dataclasses import dataclass | |
from functools import lru_cache | |
from inspect import isfunction | |
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union | |
import numpy as np | |
from packaging import version | |
from . import __version__ | |
from .dynamic_module_utils import custom_object_save | |
from .utils import ( | |
ExplicitEnum, | |
PaddingStrategy, | |
PushToHubMixin, | |
TensorType, | |
add_end_docstrings, | |
add_model_info_to_auto_map, | |
add_model_info_to_custom_pipelines, | |
cached_file, | |
copy_func, | |
download_url, | |
extract_commit_hash, | |
get_json_schema, | |
is_flax_available, | |
is_jax_tensor, | |
is_mlx_available, | |
is_numpy_array, | |
is_offline_mode, | |
is_remote_url, | |
is_tf_available, | |
is_tf_tensor, | |
is_tokenizers_available, | |
is_torch_available, | |
is_torch_device, | |
is_torch_tensor, | |
logging, | |
requires_backends, | |
to_py_obj, | |
) | |
if TYPE_CHECKING: | |
if is_torch_available(): | |
import torch | |
if is_tf_available(): | |
import tensorflow as tf | |
if is_flax_available(): | |
import jax.numpy as jnp # noqa: F401 | |
if is_tokenizers_available(): | |
from tokenizers import AddedToken | |
from tokenizers import Encoding as EncodingFast | |
else: | |
class AddedToken: | |
""" | |
AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the | |
way it should behave. | |
The `normalized` will default to `not special` if it is not specified, similarly to the definition in | |
`tokenizers`. | |
""" | |
def __init__( | |
self, content: str, single_word=False, lstrip=False, rstrip=False, special=False, normalized=None | |
): | |
self.content = content | |
self.single_word = single_word | |
self.lstrip = lstrip | |
self.rstrip = rstrip | |
self.special = special | |
self.normalized = normalized if normalized is not None else not special | |
def __getstate__(self): | |
return self.__dict__ | |
def __str__(self): | |
return self.content | |
class EncodingFast: | |
"""This is dummy class because without the `tokenizers` library we don't have these objects anyway""" | |
pass | |
logger = logging.get_logger(__name__) | |
VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input | |
LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER | |
# Define type aliases and NamedTuples | |
TextInput = str | |
PreTokenizedInput = List[str] | |
EncodedInput = List[int] | |
TextInputPair = Tuple[str, str] | |
PreTokenizedInputPair = Tuple[List[str], List[str]] | |
EncodedInputPair = Tuple[List[int], List[int]] | |
# Define type aliases for text-related non-text modalities | |
AudioInput = Union["np.ndarray", "torch.Tensor", List["np.ndarray"], List["torch.Tensor"]] | |
# Slow tokenizers used to be saved in three separated files | |
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json" | |
ADDED_TOKENS_FILE = "added_tokens.json" | |
TOKENIZER_CONFIG_FILE = "tokenizer_config.json" | |
# Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file | |
FULL_TOKENIZER_FILE = "tokenizer.json" | |
_re_tokenizer_file = re.compile(r"tokenizer\.(.*)\.json") | |
class TruncationStrategy(ExplicitEnum): | |
""" | |
Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in | |
an IDE. | |
""" | |
ONLY_FIRST = "only_first" | |
ONLY_SECOND = "only_second" | |
LONGEST_FIRST = "longest_first" | |
DO_NOT_TRUNCATE = "do_not_truncate" | |
class CharSpan(NamedTuple): | |
""" | |
Character span in the original string. | |
Args: | |
start (`int`): Index of the first character in the original string. | |
end (`int`): Index of the character following the last character in the original string. | |
""" | |
start: int | |
end: int | |
class TokenSpan(NamedTuple): | |
""" | |
Token span in an encoded string (list of tokens). | |
Args: | |
start (`int`): Index of the first token in the span. | |
end (`int`): Index of the token following the last token in the span. | |
""" | |
start: int | |
end: int | |
class BatchEncoding(UserDict): | |
""" | |
Holds the output of the [`~tokenization_utils_base.PreTrainedTokenizerBase.__call__`], | |
[`~tokenization_utils_base.PreTrainedTokenizerBase.encode_plus`] and | |
[`~tokenization_utils_base.PreTrainedTokenizerBase.batch_encode_plus`] methods (tokens, attention_masks, etc). | |
This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes | |
utility methods to map from word/character space to token space. | |
Args: | |
data (`dict`, *optional*): | |
Dictionary of lists/arrays/tensors returned by the `__call__`/`encode_plus`/`batch_encode_plus` methods | |
('input_ids', 'attention_mask', etc.). | |
encoding (`tokenizers.Encoding` or `Sequence[tokenizers.Encoding]`, *optional*): | |
If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character | |
space to token space the `tokenizers.Encoding` instance or list of instance (for batches) hold this | |
information. | |
tensor_type (`Union[None, str, TensorType]`, *optional*): | |
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at | |
initialization. | |
prepend_batch_axis (`bool`, *optional*, defaults to `False`): | |
Whether or not to add a batch axis when converting to tensors (see `tensor_type` above). | |
n_sequences (`Optional[int]`, *optional*): | |
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at | |
initialization. | |
""" | |
def __init__( | |
self, | |
data: Optional[Dict[str, Any]] = None, | |
encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None, | |
tensor_type: Union[None, str, TensorType] = None, | |
prepend_batch_axis: bool = False, | |
n_sequences: Optional[int] = None, | |
): | |
super().__init__(data) | |
if isinstance(encoding, EncodingFast): | |
encoding = [encoding] | |
self._encodings = encoding | |
if n_sequences is None and encoding is not None and len(encoding): | |
n_sequences = encoding[0].n_sequences | |
self._n_sequences = n_sequences | |
self.convert_to_tensors(tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis) | |
def n_sequences(self) -> Optional[int]: | |
""" | |
`Optional[int]`: The number of sequences used to generate each sample from the batch encoded in this | |
[`BatchEncoding`]. Currently can be one of `None` (unknown), `1` (a single sentence) or `2` (a pair of | |
sentences) | |
""" | |
return self._n_sequences | |
def is_fast(self) -> bool: | |
""" | |
`bool`: Indicate whether this [`BatchEncoding`] was generated from the result of a [`PreTrainedTokenizerFast`] | |
or not. | |
""" | |
return self._encodings is not None | |
def __getitem__(self, item: Union[int, str]) -> Union[Any, EncodingFast]: | |
""" | |
If the key is a string, returns the value of the dict associated to `key` ('input_ids', 'attention_mask', | |
etc.). | |
If the key is an integer, get the `tokenizers.Encoding` for batch item with index `key`. | |
If the key is a slice, returns the value of the dict associated to `key` ('input_ids', 'attention_mask', etc.) | |
with the constraint of slice. | |
""" | |
if isinstance(item, str): | |
return self.data[item] | |
elif self._encodings is not None: | |
return self._encodings[item] | |
elif isinstance(item, slice): | |
return {key: self.data[key][item] for key in self.data.keys()} | |
else: | |
raise KeyError( | |
"Invalid key. Only three types of key are available: " | |
"(1) string, (2) integers for backend Encoding, and (3) slices for data subsetting." | |
) | |
def __getattr__(self, item: str): | |
try: | |
return self.data[item] | |
except KeyError: | |
raise AttributeError | |
def __getstate__(self): | |
return {"data": self.data, "encodings": self._encodings} | |
def __setstate__(self, state): | |
if "data" in state: | |
self.data = state["data"] | |
if "encodings" in state: | |
self._encodings = state["encodings"] | |
def keys(self): | |
return self.data.keys() | |
def values(self): | |
return self.data.values() | |
def items(self): | |
return self.data.items() | |
# After this point: | |
# Extended properties and methods only available for fast (Rust-based) tokenizers | |
# provided by HuggingFace tokenizers library. | |
def encodings(self) -> Optional[List[EncodingFast]]: | |
""" | |
`Optional[List[tokenizers.Encoding]]`: The list all encodings from the tokenization process. Returns `None` if | |
the input was tokenized through Python (i.e., not a fast) tokenizer. | |
""" | |
return self._encodings | |
def tokens(self, batch_index: int = 0) -> List[str]: | |
""" | |
Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to | |
integer indices) at a given batch index (only works for the output of a fast tokenizer). | |
Args: | |
batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. | |
Returns: | |
`List[str]`: The list of tokens at that index. | |
""" | |
if not self._encodings: | |
raise ValueError( | |
"tokens() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" | |
" class)." | |
) | |
return self._encodings[batch_index].tokens | |
def sequence_ids(self, batch_index: int = 0) -> List[Optional[int]]: | |
""" | |
Return a list mapping the tokens to the id of their original sentences: | |
- `None` for special tokens added around or between sequences, | |
- `0` for tokens corresponding to words in the first sequence, | |
- `1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly | |
encoded. | |
Args: | |
batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. | |
Returns: | |
`List[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens added | |
by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding | |
sequence. | |
""" | |
if not self._encodings: | |
raise ValueError( | |
"sequence_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" | |
" class)." | |
) | |
return self._encodings[batch_index].sequence_ids | |
def words(self, batch_index: int = 0) -> List[Optional[int]]: | |
""" | |
Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer. | |
Args: | |
batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. | |
Returns: | |
`List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the | |
tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word | |
(several tokens will be mapped to the same word index if they are parts of that word). | |
""" | |
if not self._encodings: | |
raise ValueError( | |
"words() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" | |
" class)." | |
) | |
warnings.warn( | |
"`BatchEncoding.words()` property is deprecated and should be replaced with the identical, " | |
"but more self-explanatory `BatchEncoding.word_ids()` property.", | |
FutureWarning, | |
) | |
return self.word_ids(batch_index) | |
def word_ids(self, batch_index: int = 0) -> List[Optional[int]]: | |
""" | |
Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer. | |
Args: | |
batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. | |
Returns: | |
`List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the | |
tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word | |
(several tokens will be mapped to the same word index if they are parts of that word). | |
""" | |
if not self._encodings: | |
raise ValueError( | |
"word_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" | |
" class)." | |
) | |
return self._encodings[batch_index].word_ids | |
def token_to_sequence(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int: | |
""" | |
Get the index of the sequence represented by the given token. In the general use case, this method returns `0` | |
for a single sequence or the first sequence of a pair, and `1` for the second sequence of a pair | |
Can be called as: | |
- `self.token_to_sequence(token_index)` if batch size is 1 | |
- `self.token_to_sequence(batch_index, token_index)` if batch size is greater than 1 | |
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., | |
words are defined by the user). In this case it allows to easily associate encoded tokens with provided | |
tokenized words. | |
Args: | |
batch_or_token_index (`int`): | |
Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of | |
the token in the sequence. | |
token_index (`int`, *optional*): | |
If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the | |
sequence. | |
Returns: | |
`int`: Index of the word in the input sequence. | |
""" | |
if not self._encodings: | |
raise ValueError("token_to_sequence() is not available when using Python based tokenizers") | |
if token_index is not None: | |
batch_index = batch_or_token_index | |
else: | |
batch_index = 0 | |
token_index = batch_or_token_index | |
if batch_index < 0: | |
batch_index = self._batch_size + batch_index | |
if token_index < 0: | |
token_index = self._seq_len + token_index | |
return self._encodings[batch_index].token_to_sequence(token_index) | |
def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int: | |
""" | |
Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch. | |
Can be called as: | |
- `self.token_to_word(token_index)` if batch size is 1 | |
- `self.token_to_word(batch_index, token_index)` if batch size is greater than 1 | |
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., | |
words are defined by the user). In this case it allows to easily associate encoded tokens with provided | |
tokenized words. | |
Args: | |
batch_or_token_index (`int`): | |
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of | |
the token in the sequence. | |
token_index (`int`, *optional*): | |
If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the | |
sequence. | |
Returns: | |
`int`: Index of the word in the input sequence. | |
""" | |
if not self._encodings: | |
raise ValueError("token_to_word() is not available when using Python based tokenizers") | |
if token_index is not None: | |
batch_index = batch_or_token_index | |
else: | |
batch_index = 0 | |
token_index = batch_or_token_index | |
if batch_index < 0: | |
batch_index = self._batch_size + batch_index | |
if token_index < 0: | |
token_index = self._seq_len + token_index | |
return self._encodings[batch_index].token_to_word(token_index) | |
def word_to_tokens( | |
self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0 | |
) -> Optional[TokenSpan]: | |
""" | |
Get the encoded token span corresponding to a word in a sequence of the batch. | |
Token spans are returned as a [`~tokenization_utils_base.TokenSpan`] with: | |
- **start** -- Index of the first token. | |
- **end** -- Index of the token following the last token. | |
Can be called as: | |
- `self.word_to_tokens(word_index, sequence_index: int = 0)` if batch size is 1 | |
- `self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)` if batch size is greater or equal to | |
1 | |
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words | |
are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized | |
words. | |
Args: | |
batch_or_word_index (`int`): | |
Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of | |
the word in the sequence. | |
word_index (`int`, *optional*): | |
If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the | |
sequence. | |
sequence_index (`int`, *optional*, defaults to 0): | |
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 | |
or 1) the provided word index belongs to. | |
Returns: | |
([`~tokenization_utils_base.TokenSpan`], *optional*): Span of tokens in the encoded sequence. Returns | |
`None` if no tokens correspond to the word. This can happen especially when the token is a special token | |
that has been used to format the tokenization. For example when we add a class token at the very beginning | |
of the tokenization. | |
""" | |
if not self._encodings: | |
raise ValueError("word_to_tokens() is not available when using Python based tokenizers") | |
if word_index is not None: | |
batch_index = batch_or_word_index | |
else: | |
batch_index = 0 | |
word_index = batch_or_word_index | |
if batch_index < 0: | |
batch_index = self._batch_size + batch_index | |
if word_index < 0: | |
word_index = self._seq_len + word_index | |
span = self._encodings[batch_index].word_to_tokens(word_index, sequence_index) | |
return TokenSpan(*span) if span is not None else None | |
def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan: | |
""" | |
Get the character span corresponding to an encoded token in a sequence of the batch. | |
Character spans are returned as a [`~tokenization_utils_base.CharSpan`] with: | |
- **start** -- Index of the first character in the original string associated to the token. | |
- **end** -- Index of the character following the last character in the original string associated to the | |
token. | |
Can be called as: | |
- `self.token_to_chars(token_index)` if batch size is 1 | |
- `self.token_to_chars(batch_index, token_index)` if batch size is greater or equal to 1 | |
Args: | |
batch_or_token_index (`int`): | |
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of | |
the token in the sequence. | |
token_index (`int`, *optional*): | |
If a batch index is provided in *batch_or_token_index*, this can be the index of the token or tokens in | |
the sequence. | |
Returns: | |
[`~tokenization_utils_base.CharSpan`]: Span of characters in the original string, or None, if the token | |
(e.g. <s>, </s>) doesn't correspond to any chars in the origin string. | |
""" | |
if not self._encodings: | |
raise ValueError("token_to_chars() is not available when using Python based tokenizers") | |
if token_index is not None: | |
batch_index = batch_or_token_index | |
else: | |
batch_index = 0 | |
token_index = batch_or_token_index | |
span_indices = self._encodings[batch_index].token_to_chars(token_index) | |
return CharSpan(*span_indices) if span_indices is not None else None | |
def char_to_token( | |
self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0 | |
) -> int: | |
""" | |
Get the index of the token in the encoded output comprising a character in the original string for a sequence | |
of the batch. | |
Can be called as: | |
- `self.char_to_token(char_index)` if batch size is 1 | |
- `self.char_to_token(batch_index, char_index)` if batch size is greater or equal to 1 | |
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words | |
are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized | |
words. | |
Args: | |
batch_or_char_index (`int`): | |
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of | |
the word in the sequence | |
char_index (`int`, *optional*): | |
If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the | |
sequence. | |
sequence_index (`int`, *optional*, defaults to 0): | |
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 | |
or 1) the provided character index belongs to. | |
Returns: | |
`int`: Index of the token. | |
""" | |
if not self._encodings: | |
raise ValueError("char_to_token() is not available when using Python based tokenizers") | |
if char_index is not None: | |
batch_index = batch_or_char_index | |
else: | |
batch_index = 0 | |
char_index = batch_or_char_index | |
return self._encodings[batch_index].char_to_token(char_index, sequence_index) | |
def word_to_chars( | |
self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0 | |
) -> CharSpan: | |
""" | |
Get the character span in the original string corresponding to given word in a sequence of the batch. | |
Character spans are returned as a CharSpan NamedTuple with: | |
- start: index of the first character in the original string | |
- end: index of the character following the last character in the original string | |
Can be called as: | |
- `self.word_to_chars(word_index)` if batch size is 1 | |
- `self.word_to_chars(batch_index, word_index)` if batch size is greater or equal to 1 | |
Args: | |
batch_or_word_index (`int`): | |
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of | |
the word in the sequence | |
word_index (`int`, *optional*): | |
If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the | |
sequence. | |
sequence_index (`int`, *optional*, defaults to 0): | |
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 | |
or 1) the provided word index belongs to. | |
Returns: | |
`CharSpan` or `List[CharSpan]`: Span(s) of the associated character or characters in the string. CharSpan | |
are NamedTuple with: | |
- start: index of the first character associated to the token in the original string | |
- end: index of the character following the last character associated to the token in the original | |
string | |
""" | |
if not self._encodings: | |
raise ValueError("word_to_chars() is not available when using Python based tokenizers") | |
if word_index is not None: | |
batch_index = batch_or_word_index | |
else: | |
batch_index = 0 | |
word_index = batch_or_word_index | |
return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index, sequence_index))) | |
def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0) -> int: | |
""" | |
Get the word in the original string corresponding to a character in the original string of a sequence of the | |
batch. | |
Can be called as: | |
- `self.char_to_word(char_index)` if batch size is 1 | |
- `self.char_to_word(batch_index, char_index)` if batch size is greater than 1 | |
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words | |
are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized | |
words. | |
Args: | |
batch_or_char_index (`int`): | |
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of | |
the character in the original string. | |
char_index (`int`, *optional*): | |
If a batch index is provided in *batch_or_token_index*, this can be the index of the character in the | |
original string. | |
sequence_index (`int`, *optional*, defaults to 0): | |
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 | |
or 1) the provided character index belongs to. | |
Returns: | |
`int` or `List[int]`: Index or indices of the associated encoded token(s). | |
""" | |
if not self._encodings: | |
raise ValueError("char_to_word() is not available when using Python based tokenizers") | |
if char_index is not None: | |
batch_index = batch_or_char_index | |
else: | |
batch_index = 0 | |
char_index = batch_or_char_index | |
return self._encodings[batch_index].char_to_word(char_index, sequence_index) | |
def convert_to_tensors( | |
self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False | |
): | |
""" | |
Convert the inner content to tensors. | |
Args: | |
tensor_type (`str` or [`~utils.TensorType`], *optional*): | |
The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If | |
`None`, no modification is done. | |
prepend_batch_axis (`int`, *optional*, defaults to `False`): | |
Whether or not to add the batch dimension during the conversion. | |
""" | |
if tensor_type is None: | |
return self | |
# Convert to TensorType | |
if not isinstance(tensor_type, TensorType): | |
tensor_type = TensorType(tensor_type) | |
# Get a function reference for the correct framework | |
if tensor_type == TensorType.TENSORFLOW: | |
if not is_tf_available(): | |
raise ImportError( | |
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." | |
) | |
import tensorflow as tf | |
as_tensor = tf.constant | |
is_tensor = tf.is_tensor | |
elif tensor_type == TensorType.PYTORCH: | |
if not is_torch_available(): | |
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") | |
import torch | |
is_tensor = torch.is_tensor | |
def as_tensor(value, dtype=None): | |
if isinstance(value, list) and isinstance(value[0], np.ndarray): | |
return torch.tensor(np.array(value)) | |
return torch.tensor(value) | |
elif tensor_type == TensorType.JAX: | |
if not is_flax_available(): | |
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.") | |
import jax.numpy as jnp # noqa: F811 | |
as_tensor = jnp.array | |
is_tensor = is_jax_tensor | |
elif tensor_type == TensorType.MLX: | |
if not is_mlx_available(): | |
raise ImportError("Unable to convert output to MLX tensors format, MLX is not installed.") | |
import mlx.core as mx | |
as_tensor = mx.array | |
def is_tensor(obj): | |
return isinstance(obj, mx.array) | |
else: | |
def as_tensor(value, dtype=None): | |
if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)): | |
value_lens = [len(val) for val in value] | |
if len(set(value_lens)) > 1 and dtype is None: | |
# we have a ragged list so handle explicitly | |
value = as_tensor([np.asarray(val) for val in value], dtype=object) | |
return np.asarray(value, dtype=dtype) | |
is_tensor = is_numpy_array | |
# Do the tensor conversion in batch | |
for key, value in self.items(): | |
try: | |
if prepend_batch_axis: | |
value = [value] | |
if not is_tensor(value): | |
tensor = as_tensor(value) | |
# Removing this for now in favor of controlling the shape with `prepend_batch_axis` | |
# # at-least2d | |
# if tensor.ndim > 2: | |
# tensor = tensor.squeeze(0) | |
# elif tensor.ndim < 2: | |
# tensor = tensor[None, :] | |
self[key] = tensor | |
except Exception as e: | |
if key == "overflowing_tokens": | |
raise ValueError( | |
"Unable to create tensor returning overflowing tokens of different lengths. " | |
"Please see if a fast version of this tokenizer is available to have this feature available." | |
) from e | |
raise ValueError( | |
"Unable to create tensor, you should probably activate truncation and/or padding with" | |
" 'padding=True' 'truncation=True' to have batched tensors with the same length. Perhaps your" | |
f" features (`{key}` in this case) have excessive nesting (inputs type `list` where type `int` is" | |
" expected)." | |
) from e | |
return self | |
def to(self, device: Union[str, "torch.device"]) -> "BatchEncoding": | |
""" | |
Send all values to device by calling `v.to(device)` (PyTorch only). | |
Args: | |
device (`str` or `torch.device`): The device to put the tensors on. | |
Returns: | |
[`BatchEncoding`]: The same instance after modification. | |
""" | |
requires_backends(self, ["torch"]) | |
# This check catches things like APEX blindly calling "to" on all inputs to a module | |
# Otherwise it passes the casts down and casts the LongTensor containing the token idxs | |
# into a HalfTensor | |
if isinstance(device, str) or is_torch_device(device) or isinstance(device, int): | |
self.data = {k: v.to(device=device) for k, v in self.data.items() if v is not None} | |
else: | |
logger.warning(f"Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.") | |
return self | |
class SpecialTokensMixin: | |
""" | |
A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to | |
special tokens. In particular, this class hold the attributes which can be used to directly access these special | |
tokens in a model-independent manner and allow to set and update the special tokens. | |
Args: | |
bos_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token representing the beginning of a sentence. | |
eos_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token representing the end of a sentence. | |
unk_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token representing an out-of-vocabulary token. | |
sep_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token separating two different sentences in the same input (used by BERT for instance). | |
pad_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by | |
attention mechanisms or loss computation. | |
cls_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token representing the class of the input (used by BERT for instance). | |
mask_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token representing a masked token (used by masked-language modeling pretraining objectives, like | |
BERT). | |
additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*): | |
A tuple or a list of additional tokens, which will be marked as `special`, meaning that they will be | |
skipped when decoding if `skip_special_tokens` is set to `True`. | |
""" | |
SPECIAL_TOKENS_ATTRIBUTES = [ | |
"bos_token", | |
"eos_token", | |
"unk_token", | |
"sep_token", | |
"pad_token", | |
"cls_token", | |
"mask_token", | |
"additional_special_tokens", | |
] | |
def __init__(self, verbose=False, **kwargs): | |
self._bos_token = None | |
self._eos_token = None | |
self._unk_token = None | |
self._sep_token = None | |
self._pad_token = None | |
self._cls_token = None | |
self._mask_token = None | |
self._pad_token_type_id = 0 | |
self._additional_special_tokens = [] | |
self.verbose = verbose | |
# We directly set the hidden value to allow initialization with special tokens | |
# which are not yet in the vocabulary. Necessary for serialization/de-serialization | |
# TODO clean this up at some point (probably by switching to fast tokenizers) | |
for key, value in kwargs.items(): | |
if value is None: | |
continue | |
if key in self.SPECIAL_TOKENS_ATTRIBUTES: | |
if key == "additional_special_tokens": | |
assert isinstance(value, (list, tuple)), f"Value {value} is not a list or tuple" | |
assert all( | |
isinstance(t, (str, AddedToken)) for t in value | |
), "One of the tokens is not a string or an AddedToken" | |
setattr(self, key, value) | |
elif isinstance(value, (str, AddedToken)): | |
setattr(self, key, value) | |
else: | |
raise TypeError(f"Special token {key} has to be either str or AddedToken but got: {type(value)}") | |
def sanitize_special_tokens(self) -> int: | |
""" | |
The `sanitize_special_tokens` is now deprecated kept for backward compatibility and will be removed in | |
transformers v5. | |
""" | |
logger.warning_once("The `sanitize_special_tokens` will be removed in transformers v5.") | |
return self.add_tokens(self.all_special_tokens_extended, special_tokens=True) | |
def add_special_tokens( | |
self, special_tokens_dict: Dict[str, Union[str, AddedToken]], replace_additional_special_tokens=True | |
) -> int: | |
""" | |
Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If | |
special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the | |
current vocabulary). | |
When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the | |
model so that its embedding matrix matches the tokenizer. | |
In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. | |
Using `add_special_tokens` will ensure your special tokens can be used in several ways: | |
- Special tokens can be skipped when decoding using `skip_special_tokens = True`. | |
- Special tokens are carefully handled by the tokenizer (they are never split), similar to `AddedTokens`. | |
- You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This | |
makes it easy to develop model-agnostic training and fine-tuning scripts. | |
When possible, special tokens are already registered for provided pretrained models (for instance | |
[`BertTokenizer`] `cls_token` is already registered to be :obj*'[CLS]'* and XLM's one is also registered to be | |
`'</s>'`). | |
Args: | |
special_tokens_dict (dictionary *str* to *str* or `tokenizers.AddedToken`): | |
Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`, | |
`sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`]. | |
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer | |
assign the index of the `unk_token` to them). | |
replace_additional_special_tokens (`bool`, *optional*,, defaults to `True`): | |
If `True`, the existing list of additional special tokens will be replaced by the list provided in | |
`special_tokens_dict`. Otherwise, `self._additional_special_tokens` is just extended. In the former | |
case, the tokens will NOT be removed from the tokenizer's full vocabulary - they are only being flagged | |
as non-special tokens. Remember, this only affects which tokens are skipped during decoding, not the | |
`added_tokens_encoder` and `added_tokens_decoder`. This means that the previous | |
`additional_special_tokens` are still added tokens, and will not be split by the model. | |
Returns: | |
`int`: Number of tokens added to the vocabulary. | |
Examples: | |
```python | |
# Let's see how to add a new classification token to GPT-2 | |
tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") | |
model = GPT2Model.from_pretrained("openai-community/gpt2") | |
special_tokens_dict = {"cls_token": "<CLS>"} | |
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) | |
print("We have added", num_added_toks, "tokens") | |
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. | |
model.resize_token_embeddings(len(tokenizer)) | |
assert tokenizer.cls_token == "<CLS>" | |
```""" | |
if not special_tokens_dict: | |
return 0 | |
added_tokens = [] | |
for key, value in special_tokens_dict.items(): | |
assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token" | |
if self.verbose: | |
logger.info(f"Assigning {value} to the {key} key of the tokenizer") | |
if key == "additional_special_tokens": | |
assert isinstance(value, (list, tuple)) and all( | |
isinstance(t, (str, AddedToken)) for t in value | |
), f"Tokens {value} for key {key} should all be str or AddedToken instances" | |
to_add = [] | |
for token in value: | |
if isinstance(token, str): | |
# for legacy purpose we default to stripping. `test_add_tokens_tokenizer` depends on this | |
token = AddedToken(token, rstrip=False, lstrip=False, normalized=False, special=True) | |
if not replace_additional_special_tokens and str(token) in self.additional_special_tokens: | |
continue | |
to_add.append(token) | |
if replace_additional_special_tokens and len(to_add) > 0: | |
setattr(self, key, list(to_add)) | |
else: | |
self._additional_special_tokens.extend(to_add) | |
added_tokens += to_add | |
else: | |
if not isinstance(value, (str, AddedToken)): | |
raise ValueError(f"Token {value} for key {key} should be a str or an AddedToken instance") | |
if isinstance(value, (str)): | |
# for legacy purpose we default to stripping. `False` depends on this | |
value = AddedToken(value, rstrip=False, lstrip=False, normalized=False, special=True) | |
if isinstance(value, AddedToken): | |
setattr(self, key, value) | |
if value not in added_tokens: | |
added_tokens.append(value) | |
# if we are adding tokens that were not part of the vocab, we ought to add them | |
added_tokens = self.add_tokens(added_tokens, special_tokens=True) | |
return added_tokens | |
def add_tokens( | |
self, new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]], special_tokens: bool = False | |
) -> int: | |
""" | |
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to | |
it with indices starting from length of the current vocabulary and and will be isolated before the tokenization | |
algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore | |
not treated in the same way. | |
Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix | |
of the model so that its embedding matrix matches the tokenizer. | |
In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. | |
Args: | |
new_tokens (`str`, `tokenizers.AddedToken` or a list of *str* or `tokenizers.AddedToken`): | |
Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string | |
token to let you personalize its behavior: whether this token should only match against a single word, | |
whether this token should strip all potential whitespaces on the left side, whether this token should | |
strip all potential whitespaces on the right side, etc. | |
special_tokens (`bool`, *optional*, defaults to `False`): | |
Can be used to specify if the token is a special token. This mostly change the normalization behavior | |
(special tokens like CLS or [MASK] are usually not lower-cased for instance). | |
See details for `tokenizers.AddedToken` in HuggingFace tokenizers library. | |
Returns: | |
`int`: Number of tokens added to the vocabulary. | |
Examples: | |
```python | |
# Let's see how to increase the vocabulary of Bert model and tokenizer | |
tokenizer = BertTokenizerFast.from_pretrained("google-bert/bert-base-uncased") | |
model = BertModel.from_pretrained("google-bert/bert-base-uncased") | |
num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"]) | |
print("We have added", num_added_toks, "tokens") | |
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. | |
model.resize_token_embeddings(len(tokenizer)) | |
```""" | |
if not new_tokens: | |
return 0 | |
if not isinstance(new_tokens, (list, tuple)): | |
new_tokens = [new_tokens] | |
return self._add_tokens(new_tokens, special_tokens=special_tokens) | |
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int: | |
raise NotImplementedError | |
def bos_token(self) -> str: | |
""" | |
`str`: Beginning of sentence token. Log an error if used while not having been set. | |
""" | |
if self._bos_token is None: | |
if self.verbose: | |
logger.error("Using bos_token, but it is not set yet.") | |
return None | |
return str(self._bos_token) | |
def eos_token(self) -> str: | |
""" | |
`str`: End of sentence token. Log an error if used while not having been set. | |
""" | |
if self._eos_token is None: | |
if self.verbose: | |
logger.error("Using eos_token, but it is not set yet.") | |
return None | |
return str(self._eos_token) | |
def unk_token(self) -> str: | |
""" | |
`str`: Unknown token. Log an error if used while not having been set. | |
""" | |
if self._unk_token is None: | |
if self.verbose: | |
logger.error("Using unk_token, but it is not set yet.") | |
return None | |
return str(self._unk_token) | |
def sep_token(self) -> str: | |
""" | |
`str`: Separation token, to separate context and query in an input sequence. Log an error if used while not | |
having been set. | |
""" | |
if self._sep_token is None: | |
if self.verbose: | |
logger.error("Using sep_token, but it is not set yet.") | |
return None | |
return str(self._sep_token) | |
def pad_token(self) -> str: | |
""" | |
`str`: Padding token. Log an error if used while not having been set. | |
""" | |
if self._pad_token is None: | |
if self.verbose: | |
logger.error("Using pad_token, but it is not set yet.") | |
return None | |
return str(self._pad_token) | |
def cls_token(self) -> str: | |
""" | |
`str`: Classification token, to extract a summary of an input sequence leveraging self-attention along the full | |
depth of the model. Log an error if used while not having been set. | |
""" | |
if self._cls_token is None: | |
if self.verbose: | |
logger.error("Using cls_token, but it is not set yet.") | |
return None | |
return str(self._cls_token) | |
def mask_token(self) -> str: | |
""" | |
`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not | |
having been set. | |
""" | |
if self._mask_token is None: | |
if self.verbose: | |
logger.error("Using mask_token, but it is not set yet.") | |
return None | |
return str(self._mask_token) | |
def additional_special_tokens(self) -> List[str]: | |
""" | |
`List[str]`: All the additional special tokens you may want to use. Log an error if used while not having been | |
set. | |
""" | |
if self._additional_special_tokens is None: | |
if self.verbose: | |
logger.error("Using additional_special_tokens, but it is not set yet.") | |
return None | |
return [str(tok) for tok in self._additional_special_tokens] | |
def bos_token(self, value): | |
if not isinstance(value, (str, AddedToken)) and value is not None: | |
raise ValueError("Cannot set a non-string value as the BOS token") | |
self._bos_token = value | |
def eos_token(self, value): | |
if not isinstance(value, (str, AddedToken)) and value is not None: | |
raise ValueError("Cannot set a non-string value as the EOS token") | |
self._eos_token = value | |
def unk_token(self, value): | |
if not isinstance(value, (str, AddedToken)) and value is not None: | |
raise ValueError("Cannot set a non-string value as the UNK token") | |
self._unk_token = value | |
def sep_token(self, value): | |
if not isinstance(value, (str, AddedToken)) and value is not None: | |
raise ValueError("Cannot set a non-string value as the SEP token") | |
self._sep_token = value | |
def pad_token(self, value): | |
if not isinstance(value, (str, AddedToken)) and value is not None: | |
raise ValueError("Cannot set a non-string value as the PAD token") | |
self._pad_token = value | |
def cls_token(self, value): | |
if not isinstance(value, (str, AddedToken)) and value is not None: | |
raise ValueError("Cannot set a non-string value as the CLS token") | |
self._cls_token = value | |
def mask_token(self, value): | |
if not isinstance(value, (str, AddedToken)) and value is not None: | |
raise ValueError("Cannot set a non-string value as the MASK token") | |
self._mask_token = value | |
def additional_special_tokens(self, value): | |
self._additional_special_tokens = value if value is not None else None | |
def bos_token_id(self) -> Optional[int]: | |
""" | |
`Optional[int]`: Id of the beginning of sentence token in the vocabulary. Returns `None` if the token has not | |
been set. | |
""" | |
if self._bos_token is None: | |
return None | |
return self.convert_tokens_to_ids(self.bos_token) | |
def eos_token_id(self) -> Optional[int]: | |
""" | |
`Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been | |
set. | |
""" | |
if self._eos_token is None: | |
return None | |
return self.convert_tokens_to_ids(self.eos_token) | |
def unk_token_id(self) -> Optional[int]: | |
""" | |
`Optional[int]`: Id of the unknown token in the vocabulary. Returns `None` if the token has not been set. | |
""" | |
if self._unk_token is None: | |
return None | |
return self.convert_tokens_to_ids(self.unk_token) | |
def sep_token_id(self) -> Optional[int]: | |
""" | |
`Optional[int]`: Id of the separation token in the vocabulary, to separate context and query in an input | |
sequence. Returns `None` if the token has not been set. | |
""" | |
if self._sep_token is None: | |
return None | |
return self.convert_tokens_to_ids(self.sep_token) | |
def pad_token_id(self) -> Optional[int]: | |
""" | |
`Optional[int]`: Id of the padding token in the vocabulary. Returns `None` if the token has not been set. | |
""" | |
if self._pad_token is None: | |
return None | |
return self.convert_tokens_to_ids(self.pad_token) | |
def pad_token_type_id(self) -> int: | |
""" | |
`int`: Id of the padding token type in the vocabulary. | |
""" | |
return self._pad_token_type_id | |
def cls_token_id(self) -> Optional[int]: | |
""" | |
`Optional[int]`: Id of the classification token in the vocabulary, to extract a summary of an input sequence | |
leveraging self-attention along the full depth of the model. | |
Returns `None` if the token has not been set. | |
""" | |
if self._cls_token is None: | |
return None | |
return self.convert_tokens_to_ids(self.cls_token) | |
def mask_token_id(self) -> Optional[int]: | |
""" | |
`Optional[int]`: Id of the mask token in the vocabulary, used when training a model with masked-language | |
modeling. Returns `None` if the token has not been set. | |
""" | |
if self._mask_token is None: | |
return None | |
return self.convert_tokens_to_ids(self.mask_token) | |
def additional_special_tokens_ids(self) -> List[int]: | |
""" | |
`List[int]`: Ids of all the additional special tokens in the vocabulary. Log an error if used while not having | |
been set. | |
""" | |
return self.convert_tokens_to_ids(self.additional_special_tokens) | |
def bos_token_id(self, value): | |
self._bos_token = self.convert_ids_to_tokens(value) if value is not None else None | |
def eos_token_id(self, value): | |
self._eos_token = self.convert_ids_to_tokens(value) if value is not None else None | |
def unk_token_id(self, value): | |
self._unk_token = self.convert_ids_to_tokens(value) if value is not None else None | |
def sep_token_id(self, value): | |
self._sep_token = self.convert_ids_to_tokens(value) if value is not None else None | |
def pad_token_id(self, value): | |
self._pad_token = self.convert_ids_to_tokens(value) if value is not None else None | |
def cls_token_id(self, value): | |
self._cls_token = self.convert_ids_to_tokens(value) if value is not None else None | |
def mask_token_id(self, value): | |
self._mask_token = self.convert_ids_to_tokens(value) if value is not None else None | |
def additional_special_tokens_ids(self, values): | |
self._additional_special_tokens = [self.convert_ids_to_tokens(value) for value in values] | |
def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]: | |
""" | |
`Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes (`cls_token`, | |
`unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.). | |
Convert potential tokens of `tokenizers.AddedToken` type to string. | |
""" | |
set_attr = {} | |
for attr in self.SPECIAL_TOKENS_ATTRIBUTES: | |
attr_value = getattr(self, attr) | |
if attr_value: | |
set_attr[attr] = attr_value | |
return set_attr | |
def special_tokens_map_extended(self) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]: | |
""" | |
`Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping | |
special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.). | |
Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how | |
special tokens are tokenized. | |
""" | |
set_attr = {} | |
for attr in self.SPECIAL_TOKENS_ATTRIBUTES: | |
attr_value = getattr(self, "_" + attr) | |
if attr_value: | |
set_attr[attr] = attr_value | |
return set_attr | |
def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]: | |
""" | |
`List[Union[str, tokenizers.AddedToken]]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.), the order has | |
nothing to do with the index of each tokens. If you want to know the correct indices, check | |
`self.added_tokens_encoder`. We can't create an order anymore as the keys are `AddedTokens` and not `Strings`. | |
Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how | |
special tokens are tokenized. | |
""" | |
all_tokens = [] | |
seen = set() | |
for value in self.special_tokens_map_extended.values(): | |
if isinstance(value, (list, tuple)): | |
tokens_to_add = [token for token in value if str(token) not in seen] | |
else: | |
tokens_to_add = [value] if str(value) not in seen else [] | |
seen.update(map(str, tokens_to_add)) | |
all_tokens.extend(tokens_to_add) | |
return all_tokens | |
def all_special_tokens(self) -> List[str]: | |
""" | |
`List[str]`: A list of the unique special tokens (`'<unk>'`, `'<cls>'`, ..., etc.). | |
Convert tokens of `tokenizers.AddedToken` type to string. | |
""" | |
all_toks = [str(s) for s in self.all_special_tokens_extended] | |
return all_toks | |
def all_special_ids(self) -> List[int]: | |
""" | |
`List[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes. | |
""" | |
all_toks = self.all_special_tokens | |
all_ids = self.convert_tokens_to_ids(all_toks) | |
return all_ids | |
ENCODE_KWARGS_DOCSTRING = r""" | |
add_special_tokens (`bool`, *optional*, defaults to `True`): | |
Whether or not to add special tokens when encoding the sequences. This will use the underlying | |
`PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are | |
automatically added to the input ids. This is usefull if you want to add `bos` or `eos` tokens | |
automatically. | |
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): | |
Activates and controls padding. Accepts the following values: | |
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single | |
sequence if provided). | |
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum | |
acceptable input length for the model if that argument is not provided. | |
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different | |
lengths). | |
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): | |
Activates and controls truncation. Accepts the following values: | |
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or | |
to the maximum acceptable input length for the model if that argument is not provided. This will | |
truncate token by token, removing a token from the longest sequence in the pair if a pair of | |
sequences (or a batch of pairs) is provided. | |
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the | |
maximum acceptable input length for the model if that argument is not provided. This will only | |
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. | |
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the | |
maximum acceptable input length for the model if that argument is not provided. This will only | |
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. | |
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths | |
greater than the model maximum admissible input size). | |
max_length (`int`, *optional*): | |
Controls the maximum length to use by one of the truncation/padding parameters. | |
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length | |
is required by one of the truncation/padding parameters. If the model has no specific maximum input | |
length (like XLNet) truncation/padding to a maximum length will be deactivated. | |
stride (`int`, *optional*, defaults to 0): | |
If set to a number along with `max_length`, the overflowing tokens returned when | |
`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence | |
returned to provide some overlap between truncated and overflowing sequences. The value of this | |
argument defines the number of overlapping tokens. | |
is_split_into_words (`bool`, *optional*, defaults to `False`): | |
Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the | |
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) | |
which it will tokenize. This is useful for NER or token classification. | |
pad_to_multiple_of (`int`, *optional*): | |
If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated. | |
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability | |
`>= 7.5` (Volta). | |
return_tensors (`str` or [`~utils.TensorType`], *optional*): | |
If set, will return tensors instead of list of python integers. Acceptable values are: | |
- `'tf'`: Return TensorFlow `tf.constant` objects. | |
- `'pt'`: Return PyTorch `torch.Tensor` objects. | |
- `'np'`: Return Numpy `np.ndarray` objects. | |
""" | |
ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" | |
return_token_type_ids (`bool`, *optional*): | |
Whether to return token type IDs. If left to the default, will return the token type IDs according to | |
the specific tokenizer's default, defined by the `return_outputs` attribute. | |
[What are token type IDs?](../glossary#token-type-ids) | |
return_attention_mask (`bool`, *optional*): | |
Whether to return the attention mask. If left to the default, will return the attention mask according | |
to the specific tokenizer's default, defined by the `return_outputs` attribute. | |
[What are attention masks?](../glossary#attention-mask) | |
return_overflowing_tokens (`bool`, *optional*, defaults to `False`): | |
Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch | |
of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead | |
of returning overflowing tokens. | |
return_special_tokens_mask (`bool`, *optional*, defaults to `False`): | |
Whether or not to return special tokens mask information. | |
return_offsets_mapping (`bool`, *optional*, defaults to `False`): | |
Whether or not to return `(char_start, char_end)` for each token. | |
This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using | |
Python's tokenizer, this method will raise `NotImplementedError`. | |
return_length (`bool`, *optional*, defaults to `False`): | |
Whether or not to return the lengths of the encoded inputs. | |
verbose (`bool`, *optional*, defaults to `True`): | |
Whether or not to print more information and warnings. | |
**kwargs: passed to the `self.tokenize()` method | |
Return: | |
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields: | |
- **input_ids** -- List of token ids to be fed to a model. | |
[What are input IDs?](../glossary#input-ids) | |
- **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or | |
if *"token_type_ids"* is in `self.model_input_names`). | |
[What are token type IDs?](../glossary#token-type-ids) | |
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when | |
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). | |
[What are attention masks?](../glossary#attention-mask) | |
- **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and | |
`return_overflowing_tokens=True`). | |
- **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and | |
`return_overflowing_tokens=True`). | |
- **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying | |
regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). | |
- **length** -- The length of the inputs (when `return_length=True`) | |
""" | |
INIT_TOKENIZER_DOCSTRING = r""" | |
Class attributes (overridden by derived classes) | |
- **vocab_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each | |
vocabulary file required by the model, and as associated values, the filename for saving the associated file | |
(string). | |
- **pretrained_vocab_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the | |
high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the | |
low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the | |
associated pretrained vocabulary file. | |
- **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model. | |
- **padding_side** (`str`) -- The default value for the side on which the model should have padding applied. | |
Should be `'right'` or `'left'`. | |
- **truncation_side** (`str`) -- The default value for the side on which the model should have truncation | |
applied. Should be `'right'` or `'left'`. | |
Args: | |
model_max_length (`int`, *optional*): | |
The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is | |
loaded with [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], this will be set to the | |
value stored for the associated model in `max_model_input_sizes` (see above). If no value is provided, will | |
default to VERY_LARGE_INTEGER (`int(1e30)`). | |
padding_side (`str`, *optional*): | |
The side on which the model should have padding applied. Should be selected between ['right', 'left']. | |
Default value is picked from the class attribute of the same name. | |
truncation_side (`str`, *optional*): | |
The side on which the model should have truncation applied. Should be selected between ['right', 'left']. | |
Default value is picked from the class attribute of the same name. | |
chat_template (`str`, *optional*): | |
A Jinja template string that will be used to format lists of chat messages. See | |
https://huggingface.co/docs/transformers/chat_templating for a full description. | |
model_input_names (`List[string]`, *optional*): | |
The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or | |
`"attention_mask"`). Default value is picked from the class attribute of the same name. | |
bos_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token representing the beginning of a sentence. Will be associated to `self.bos_token` and | |
`self.bos_token_id`. | |
eos_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token representing the end of a sentence. Will be associated to `self.eos_token` and | |
`self.eos_token_id`. | |
unk_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token representing an out-of-vocabulary token. Will be associated to `self.unk_token` and | |
`self.unk_token_id`. | |
sep_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token separating two different sentences in the same input (used by BERT for instance). Will be | |
associated to `self.sep_token` and `self.sep_token_id`. | |
pad_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by | |
attention mechanisms or loss computation. Will be associated to `self.pad_token` and `self.pad_token_id`. | |
cls_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token representing the class of the input (used by BERT for instance). Will be associated to | |
`self.cls_token` and `self.cls_token_id`. | |
mask_token (`str` or `tokenizers.AddedToken`, *optional*): | |
A special token representing a masked token (used by masked-language modeling pretraining objectives, like | |
BERT). Will be associated to `self.mask_token` and `self.mask_token_id`. | |
additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*): | |
A tuple or a list of additional special tokens. Add them here to ensure they are skipped when decoding with | |
`skip_special_tokens` is set to True. If they are not part of the vocabulary, they will be added at the end | |
of the vocabulary. | |
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): | |
Whether or not the model should cleanup the spaces that were added when splitting the input text during the | |
tokenization process. | |
split_special_tokens (`bool`, *optional*, defaults to `False`): | |
Whether or not the special tokens should be split during the tokenization process. Passing will affect the | |
internal state of the tokenizer. The default behavior is to not split special tokens. This means that if | |
`<s>` is the `bos_token`, then `tokenizer.tokenize("<s>") = ['<s>`]. Otherwise, if | |
`split_special_tokens=True`, then `tokenizer.tokenize("<s>")` will be give `['<','s', '>']`. | |
""" | |
class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin): | |
""" | |
Base class for [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`]. | |
Handles shared (mostly boiler plate) methods for those two classes. | |
""" | |
vocab_files_names: Dict[str, str] = {} | |
pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {} | |
_auto_class: Optional[str] = None | |
# first name has to correspond to main model input name | |
# to make sure `tokenizer.pad(...)` works correctly | |
model_input_names: List[str] = ["input_ids", "token_type_ids", "attention_mask"] | |
padding_side: str = "right" | |
truncation_side: str = "right" | |
slow_tokenizer_class = None | |
def __init__(self, **kwargs): | |
# inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``) | |
self.init_inputs = () | |
for key in kwargs: | |
if hasattr(self, key) and callable(getattr(self, key)): | |
raise AttributeError(f"{key} conflicts with the method {key} in {self.__class__.__name__}") | |
self.init_kwargs = copy.deepcopy(kwargs) | |
self.name_or_path = kwargs.pop("name_or_path", "") | |
self._processor_class = kwargs.pop("processor_class", None) | |
# For backward compatibility we fallback to set model_max_length from max_len if provided | |
model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None)) | |
self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER | |
# Padding and truncation side are right by default and overridden in subclasses. If specified in the kwargs, it | |
# is changed. | |
self.padding_side = kwargs.pop("padding_side", self.padding_side) | |
if self.padding_side not in ["right", "left"]: | |
raise ValueError( | |
f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}" | |
) | |
self.truncation_side = kwargs.pop("truncation_side", self.truncation_side) | |
if self.truncation_side not in ["right", "left"]: | |
raise ValueError( | |
f"Truncation side should be selected between 'right' and 'left', current value: {self.truncation_side}" | |
) | |
self.model_input_names = kwargs.pop("model_input_names", self.model_input_names) | |
if "clean_up_tokenization_spaces" not in kwargs: | |
warnings.warn( | |
"`clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This " | |
"behavior will be depracted in transformers v4.45, and will be then set to `False` by default. " | |
"For more details check this issue: https://github.com/huggingface/transformers/issues/31884", | |
FutureWarning, | |
) | |
# By default, cleaning tokenization spaces for both fast and slow tokenizers | |
self.clean_up_tokenization_spaces = kwargs.pop("clean_up_tokenization_spaces", True) | |
# By default, do not split special tokens for both fast and slow tokenizers | |
self.split_special_tokens = kwargs.pop("split_special_tokens", False) | |
self.deprecation_warnings = {} # Use to store when we have already noticed a deprecation warning (avoid overlogging). | |
self._in_target_context_manager = False | |
# Stores a Jinja template that formats chat histories into tokenizable strings | |
self.chat_template = kwargs.pop("chat_template", None) | |
if isinstance(self.chat_template, (list, tuple)): | |
# Chat templates are stored as lists of dicts with fixed key names, | |
# we reconstruct that into a single dict while loading them. | |
self.chat_template = {template["name"]: template["template"] for template in self.chat_template} | |
super().__init__(**kwargs) | |
def max_len_single_sentence(self) -> int: | |
""" | |
`int`: The maximum length of a sentence that can be fed to the model. | |
""" | |
return self.model_max_length - self.num_special_tokens_to_add(pair=False) | |
def max_len_sentences_pair(self) -> int: | |
""" | |
`int`: The maximum combined length of a pair of sentences that can be fed to the model. | |
""" | |
return self.model_max_length - self.num_special_tokens_to_add(pair=True) | |
def max_len_single_sentence(self, value) -> int: | |
# For backward compatibility, allow to try to setup 'max_len_single_sentence'. | |
if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose: | |
if not self.deprecation_warnings.get("max_len_single_sentence", False): | |
logger.warning( | |
"Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up." | |
) | |
self.deprecation_warnings["max_len_single_sentence"] = True | |
else: | |
raise ValueError( | |
"Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up." | |
) | |
def max_len_sentences_pair(self, value) -> int: | |
# For backward compatibility, allow to try to setup 'max_len_sentences_pair'. | |
if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose: | |
if not self.deprecation_warnings.get("max_len_sentences_pair", False): | |
logger.warning( | |
"Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up." | |
) | |
self.deprecation_warnings["max_len_sentences_pair"] = True | |
else: | |
raise ValueError("Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.") | |
def _set_processor_class(self, processor_class: str): | |
"""Sets processor class as an attribute.""" | |
self._processor_class = processor_class | |
def added_tokens_decoder(self) -> Dict[int, AddedToken]: | |
raise NotImplementedError() | |
def __repr__(self) -> str: | |
added_tokens_decoder_rep = "\n\t".join([f"{k}: {v.__repr__()}," for k, v in self.added_tokens_decoder.items()]) | |
return ( | |
f"{self.__class__.__name__}(name_or_path='{self.name_or_path}'," | |
f" vocab_size={self.vocab_size}, model_max_length={self.model_max_length}, is_fast={self.is_fast}," | |
f" padding_side='{self.padding_side}', truncation_side='{self.truncation_side}'," | |
f" special_tokens={self.special_tokens_map}, clean_up_tokenization_spaces={self.clean_up_tokenization_spaces}), " | |
" added_tokens_decoder={\n\t" + added_tokens_decoder_rep + "\n}" | |
) | |
def __len__(self) -> int: | |
raise NotImplementedError() | |
def get_vocab(self) -> Dict[str, int]: | |
""" | |
Returns the vocabulary as a dictionary of token to index. | |
`tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the | |
vocab. | |
Returns: | |
`Dict[str, int]`: The vocabulary. | |
""" | |
raise NotImplementedError() | |
def apply_chat_template( | |
self, | |
conversation: Union[List[Dict[str, str]], List[List[Dict[str, str]]]], | |
tools: Optional[List[Dict]] = None, | |
documents: Optional[List[Dict[str, str]]] = None, | |
chat_template: Optional[str] = None, | |
add_generation_prompt: bool = False, | |
tokenize: bool = True, | |
padding: bool = False, | |
truncation: bool = False, | |
max_length: Optional[int] = None, | |
return_tensors: Optional[Union[str, TensorType]] = None, | |
return_dict: bool = False, | |
return_assistant_tokens_mask: bool = False, | |
tokenizer_kwargs: Optional[Dict[str, Any]] = None, | |
**kwargs, | |
) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]: | |
""" | |
Converts a list of dictionaries with `"role"` and `"content"` keys to a list of token | |
ids. This method is intended for use with chat models, and will read the tokenizer's chat_template attribute to | |
determine the format and control tokens to use when converting. | |
Args: | |
conversation (Union[List[Dict[str, str]], List[List[Dict[str, str]]]]): A list of dicts | |
with "role" and "content" keys, representing the chat history so far. | |
tools (`List[Dict]`, *optional*): | |
A list of tools (callable functions) that will be accessible to the model. If the template does not | |
support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema, | |
giving the name, description and argument types for the tool. See our | |
[chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use) | |
for more information. | |
documents (`List[Dict[str, str]]`, *optional*): | |
A list of dicts representing documents that will be accessible to the model if it is performing RAG | |
(retrieval-augmented generation). If the template does not support RAG, this argument will have no | |
effect. We recommend that each document should be a dict containing "title" and "text" keys. Please | |
see the RAG section of the [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#arguments-for-RAG) | |
for examples of passing documents with chat templates. | |
chat_template (`str`, *optional*): | |
A Jinja template to use for this conversion. It is usually not necessary to pass anything to this | |
argument, as the model's template will be used by default. | |
add_generation_prompt (bool, *optional*): Whether to end the prompt with the token(s) that indicate | |
the start of an assistant message. This is useful when you want to generate a response from the model. | |
Note that this argument will be passed to the chat template, and so it must be supported in the | |
template for this argument to have any effect. | |
tokenize (`bool`, defaults to `True`): | |
Whether to tokenize the output. If `False`, the output will be a string. | |
padding (`bool`, defaults to `False`): | |
Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`. | |
truncation (`bool`, defaults to `False`): | |
Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`. | |
max_length (`int`, *optional*): | |
Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If | |
not specified, the tokenizer's `max_length` attribute will be used as a default. | |
return_tensors (`str` or [`~utils.TensorType`], *optional*): | |
If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable | |
values are: | |
- `'tf'`: Return TensorFlow `tf.Tensor` objects. | |
- `'pt'`: Return PyTorch `torch.Tensor` objects. | |
- `'np'`: Return NumPy `np.ndarray` objects. | |
- `'jax'`: Return JAX `jnp.ndarray` objects. | |
return_dict (`bool`, defaults to `False`): | |
Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`. | |
tokenizer_kwargs (`Dict[str: Any]`, *optional*): Additional kwargs to pass to the tokenizer. | |
return_assistant_tokens_mask (`bool`, defaults to `False`): | |
Whether to return a mask of the assistant generated tokens. For tokens generated by the assistant, | |
the mask will contain 1. For user and system tokens, the mask will contain 0. | |
This functionality is only available for chat templates that support it via the `{% generation %}` keyword. | |
**kwargs: Additional kwargs to pass to the template renderer. Will be accessible by the chat template. | |
Returns: | |
`Union[List[int], Dict]`: A list of token ids representing the tokenized chat so far, including control tokens. This | |
output is ready to pass to the model, either directly or via methods like `generate()`. If `return_dict` is | |
set, will return a dict of tokenizer outputs instead. | |
""" | |
if return_dict and not tokenize: | |
raise ValueError( | |
"`return_dict=True` is incompatible with `tokenize=False`, because there is no dict " | |
"of tokenizer outputs to return." | |
) | |
if return_assistant_tokens_mask and not return_dict: | |
raise ValueError("`return_assistant_tokens_mask=True` is incompatible with `return_dict=False`") | |
if tokenizer_kwargs is None: | |
tokenizer_kwargs = {} | |
chat_template = self.get_chat_template(chat_template, tools) | |
if return_assistant_tokens_mask and not re.search(r"\{\%-?\s*generation\s*-?\%\}", chat_template): | |
logger.warning_once( | |
"return_assistant_tokens_mask==True but chat template does not contain `{% generation %}` keyword." | |
) | |
# Compilation function uses a cache to avoid recompiling the same template | |
compiled_template = self._compile_jinja_template(chat_template) | |
if isinstance(conversation, (list, tuple)) and ( | |
isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "messages") | |
): | |
conversations = conversation | |
is_batched = True | |
else: | |
conversations = [conversation] | |
is_batched = False | |
# We accept either JSON schemas or functions for tools. If we get functions, we convert them to schemas | |
if tools is not None: | |
tool_schemas = [] | |
for tool in tools: | |
if isinstance(tool, dict): | |
tool_schemas.append(tool) | |
elif isfunction(tool): | |
tool_schemas.append(get_json_schema(tool)) | |
else: | |
raise ValueError( | |
"Tools should either be a JSON schema, or a callable function with type hints " | |
"and a docstring suitable for auto-conversion to a schema." | |
) | |
else: | |
tool_schemas = None | |
if documents is not None: | |
for document in documents: | |
if not isinstance(document, dict): | |
raise TypeError("Documents should be a list of dicts with 'title' and 'text' keys!") | |
rendered = [] | |
all_generation_indices = [] | |
template_kwargs = {**self.special_tokens_map, **kwargs} # kwargs overwrite special tokens if both are present | |
for chat in conversations: | |
if hasattr(chat, "messages"): | |
# Indicates it's a Conversation object | |
chat = chat.messages | |
if return_assistant_tokens_mask: | |
rendered_chat, generation_indices = self._render_with_assistant_indices( | |
compiled_template=compiled_template, | |
messages=chat, | |
tools=tool_schemas, | |
documents=documents, | |
add_generation_prompt=add_generation_prompt, | |
**template_kwargs, | |
) | |
all_generation_indices.append(generation_indices) | |
else: | |
rendered_chat = compiled_template.render( | |
messages=chat, | |
tools=tool_schemas, | |
documents=documents, | |
add_generation_prompt=add_generation_prompt, | |
**template_kwargs, | |
) | |
rendered.append(rendered_chat) | |
if not is_batched: | |
rendered = rendered[0] | |
if tokenize: | |
out = self( | |
rendered, | |
padding=padding, | |
truncation=truncation, | |
max_length=max_length, | |
add_special_tokens=False, | |
return_tensors=return_tensors, | |
**tokenizer_kwargs, | |
) | |
if return_dict: | |
if return_assistant_tokens_mask: | |
assistant_masks = [] | |
if is_batched or return_tensors: | |
input_ids = out["input_ids"] | |
else: | |
input_ids = [out["input_ids"]] | |
for i in range(len(input_ids)): | |
current_mask = [0] * len(input_ids[i]) | |
for assistant_start_char, assistant_end_char in all_generation_indices[i]: | |
start_token = out.char_to_token(i, assistant_start_char) | |
end_token = out.char_to_token(i, assistant_end_char - 1) | |
if start_token is None: | |
# start_token is out of bounds maybe due to truncation. | |
break | |
for token_id in range(start_token, end_token + 1 if end_token else len(input_ids)): | |
current_mask[token_id] = 1 | |
assistant_masks.append(current_mask) | |
out["assistant_masks"] = assistant_masks if is_batched else assistant_masks[0] | |
return out | |
else: | |
return out["input_ids"] | |
else: | |
return rendered | |
def _render_with_assistant_indices( | |
self, compiled_template, messages, tools, documents, add_generation_prompt, **template_kwargs | |
): | |
rendered_blocks = [] | |
generation_indices = [] | |
with compiled_template.environment.activate_tracker(rendered_blocks, generation_indices): | |
for block in compiled_template.generate( | |
messages=messages, | |
tools=tools, | |
documents=documents, | |
add_generation_prompt=add_generation_prompt, | |
**template_kwargs, | |
): | |
rendered_blocks.append(block) | |
rendered_chat = "".join(rendered_blocks) | |
return rendered_chat, generation_indices | |
def _compile_jinja_template(self, chat_template): | |
try: | |
import jinja2 | |
from jinja2 import nodes | |
from jinja2.exceptions import TemplateError | |
from jinja2.ext import Extension | |
from jinja2.sandbox import ImmutableSandboxedEnvironment | |
except ImportError: | |
raise ImportError("apply_chat_template requires jinja2 to be installed.") | |
if version.parse(jinja2.__version__) < version.parse("3.1.0"): | |
raise ImportError( | |
"apply_chat_template requires jinja2>=3.1.0 to be installed. Your version is " f"{jinja2.__version__}." | |
) | |
def raise_exception(message): | |
raise TemplateError(message) | |
def tojson(x, ensure_ascii=False, indent=None, separators=None, sort_keys=False): | |
# We override the built-in tojson filter because Jinja's default filter escapes HTML characters | |
# We also expose some options like custom indents and separators | |
return json.dumps(x, ensure_ascii=ensure_ascii, indent=indent, separators=separators, sort_keys=sort_keys) | |
class AssistantTracker(Extension): | |
# This extension is used to track the indices of assistant-generated tokens in the rendered chat | |
tags = {"generation"} | |
def __init__(self, environment: ImmutableSandboxedEnvironment): | |
# The class is only initiated by jinja. | |
super().__init__(environment) | |
environment.extend(activate_tracker=self.activate_tracker) | |
self._rendered_blocks = None | |
self._generation_indices = None | |
def parse(self, parser: jinja2.parser.Parser) -> jinja2.nodes.CallBlock: | |
lineno = next(parser.stream).lineno | |
body = parser.parse_statements(["name:endgeneration"], drop_needle=True) | |
return nodes.CallBlock(self.call_method("_generation_support"), [], [], body).set_lineno(lineno) | |
def _generation_support(self, context: jinja2.nodes.EvalContext, caller: jinja2.runtime.Macro) -> str: | |
rv = caller() | |
if self.is_active(): | |
# Only track generation indices if the tracker is active | |
start_index = len("".join(self._rendered_blocks)) | |
end_index = start_index + len(rv) | |
self._generation_indices.append((start_index, end_index)) | |
return rv | |
def is_active(self) -> bool: | |
return self._rendered_blocks or self._generation_indices | |
def activate_tracker(self, rendered_blocks: List[int], generation_indices: List[int]): | |
try: | |
if self.is_active(): | |
raise ValueError("AssistantTracker should not be reused before closed") | |
self._rendered_blocks = rendered_blocks | |
self._generation_indices = generation_indices | |
yield | |
finally: | |
self._rendered_blocks = None | |
self._generation_indices = None | |
jinja_env = ImmutableSandboxedEnvironment(trim_blocks=True, lstrip_blocks=True, extensions=[AssistantTracker]) | |
jinja_env.filters["tojson"] = tojson | |
jinja_env.globals["raise_exception"] = raise_exception | |
return jinja_env.from_string(chat_template) | |
def get_chat_template(self, chat_template: Optional[str] = None, tools: Optional[List[Dict]] = None) -> str: | |
""" | |
Retrieve the chat template string used for tokenizing chat messages. This template is used | |
internally by the `apply_chat_template` method and can also be used externally to retrieve the model's chat | |
template for better generation tracking. | |
Args: | |
chat_template (`str`, *optional*): | |
A Jinja template or the name of a template to use for this conversion. | |
It is usually not necessary to pass anything to this argument, | |
as the model's template will be used by default. | |
tools (`List[Dict]`, *optional*): | |
A list of tools (callable functions) that will be accessible to the model. If the template does not | |
support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema, | |
giving the name, description and argument types for the tool. See our | |
[chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use) | |
for more information. | |
Returns: | |
`str`: The chat template string. | |
""" | |
# First, handle the cases when the model has a dict of multiple templates | |
if isinstance(self.chat_template, dict): | |
template_dict = self.chat_template | |
if chat_template is not None and chat_template in template_dict: | |
# The user can pass the name of a template to the chat template argument instead of an entire template | |
chat_template = template_dict[chat_template] | |
elif chat_template is None: | |
if tools is not None and "tool_use" in template_dict: | |
chat_template = template_dict["tool_use"] | |
elif "default" in template_dict: | |
chat_template = template_dict["default"] | |
else: | |
raise ValueError( | |
"This model has multiple chat templates with no default specified! Please either pass a chat " | |
"template or the name of the template you wish to use to the `chat_template` argument. Available " | |
f"template names are {sorted(template_dict.keys())}." | |
) | |
elif chat_template is None: | |
# These are the cases when the model has a single template | |
# priority: `chat_template` argument > `tokenizer.chat_template` | |
if self.chat_template is not None: | |
chat_template = self.chat_template | |
else: | |
raise ValueError( | |
"Cannot use apply_chat_template() because tokenizer.chat_template is not set and no template " | |
"argument was passed! For information about writing templates and setting the " | |
"tokenizer.chat_template attribute, please see the documentation at " | |
"https://huggingface.co/docs/transformers/main/en/chat_templating" | |
) | |
return chat_template | |
def from_pretrained( | |
cls, | |
pretrained_model_name_or_path: Union[str, os.PathLike], | |
*init_inputs, | |
cache_dir: Optional[Union[str, os.PathLike]] = None, | |
force_download: bool = False, | |
local_files_only: bool = False, | |
token: Optional[Union[str, bool]] = None, | |
revision: str = "main", | |
trust_remote_code=False, | |
**kwargs, | |
): | |
r""" | |
Instantiate a [`~tokenization_utils_base.PreTrainedTokenizerBase`] (or a derived class) from a predefined | |
tokenizer. | |
Args: | |
pretrained_model_name_or_path (`str` or `os.PathLike`): | |
Can be either: | |
- A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co. | |
- A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved | |
using the [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`] method, e.g., | |
`./my_model_directory/`. | |
- (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary | |
file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g., | |
`./my_model_directory/vocab.txt`. | |
cache_dir (`str` or `os.PathLike`, *optional*): | |
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the | |
standard cache should not be used. | |
force_download (`bool`, *optional*, defaults to `False`): | |
Whether or not to force the (re-)download the vocabulary files and override the cached versions if they | |
exist. | |
resume_download: | |
Deprecated and ignored. All downloads are now resumed by default when possible. | |
Will be removed in v5 of Transformers. | |
proxies (`Dict[str, str]`, *optional*): | |
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', | |
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. | |
token (`str` or *bool*, *optional*): | |
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated | |
when running `huggingface-cli login` (stored in `~/.huggingface`). | |
local_files_only (`bool`, *optional*, defaults to `False`): | |
Whether or not to only rely on local files and not to attempt to download any files. | |
revision (`str`, *optional*, defaults to `"main"`): | |
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a | |
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any | |
identifier allowed by git. | |
subfolder (`str`, *optional*): | |
In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for | |
facebook/rag-token-base), specify it here. | |
inputs (additional positional arguments, *optional*): | |
Will be passed along to the Tokenizer `__init__` method. | |
trust_remote_code (`bool`, *optional*, defaults to `False`): | |
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option | |
should only be set to `True` for repositories you trust and in which you have read the code, as it will | |
execute code present on the Hub on your local machine. | |
kwargs (additional keyword arguments, *optional*): | |
Will be passed to the Tokenizer `__init__` method. Can be used to set special tokens like `bos_token`, | |
`eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`, | |
`additional_special_tokens`. See parameters in the `__init__` for more details. | |
<Tip> | |
Passing `token=True` is required when you want to use a private model. | |
</Tip> | |
Examples: | |
```python | |
# We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer | |
# Download vocabulary from huggingface.co and cache. | |
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") | |
# Download vocabulary from huggingface.co (user-uploaded) and cache. | |
tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased") | |
# If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*) | |
tokenizer = BertTokenizer.from_pretrained("./test/saved_model/") | |
# If the tokenizer uses a single vocabulary file, you can point directly to this file | |
tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt") | |
# You can link tokens to special vocabulary when instantiating | |
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased", unk_token="<unk>") | |
# You should be sure '<unk>' is in the vocabulary when doing that. | |
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead) | |
assert tokenizer.unk_token == "<unk>" | |
```""" | |
resume_download = kwargs.pop("resume_download", None) | |
proxies = kwargs.pop("proxies", None) | |
use_auth_token = kwargs.pop("use_auth_token", None) | |
subfolder = kwargs.pop("subfolder", None) | |
from_pipeline = kwargs.pop("_from_pipeline", None) | |
from_auto_class = kwargs.pop("_from_auto", False) | |
commit_hash = kwargs.pop("_commit_hash", None) | |
gguf_file = kwargs.get("gguf_file", None) | |
if use_auth_token is not None: | |
warnings.warn( | |
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", | |
FutureWarning, | |
) | |
if token is not None: | |
raise ValueError( | |
"`token` and `use_auth_token` are both specified. Please set only the argument `token`." | |
) | |
token = use_auth_token | |
user_agent = {"file_type": "tokenizer", "from_auto_class": from_auto_class, "is_fast": "Fast" in cls.__name__} | |
if from_pipeline is not None: | |
user_agent["using_pipeline"] = from_pipeline | |
if is_offline_mode() and not local_files_only: | |
logger.info("Offline mode: forcing local_files_only=True") | |
local_files_only = True | |
pretrained_model_name_or_path = str(pretrained_model_name_or_path) | |
vocab_files = {} | |
init_configuration = {} | |
is_local = os.path.isdir(pretrained_model_name_or_path) | |
single_file_id = None | |
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): | |
if len(cls.vocab_files_names) > 1 and not gguf_file: | |
raise ValueError( | |
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not " | |
"supported for this tokenizer. Use a model identifier or the path to a directory instead." | |
) | |
warnings.warn( | |
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and " | |
"won't be possible anymore in v5. Use a model identifier or the path to a directory instead.", | |
FutureWarning, | |
) | |
file_id = list(cls.vocab_files_names.keys())[0] | |
vocab_files[file_id] = pretrained_model_name_or_path | |
single_file_id = file_id | |
else: | |
if gguf_file: | |
vocab_files["vocab_file"] = gguf_file | |
else: | |
# At this point pretrained_model_name_or_path is either a directory or a model identifier name | |
additional_files_names = { | |
"added_tokens_file": ADDED_TOKENS_FILE, # kept only for legacy | |
"special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE, # kept only for legacy | |
"tokenizer_config_file": TOKENIZER_CONFIG_FILE, | |
# tokenizer_file used to initialize a slow from a fast. Properly copy the `addedTokens` instead of adding in random orders | |
"tokenizer_file": FULL_TOKENIZER_FILE, | |
} | |
vocab_files = {**cls.vocab_files_names, **additional_files_names} | |
if "tokenizer_file" in vocab_files: | |
# Try to get the tokenizer config to see if there are versioned tokenizer files. | |
fast_tokenizer_file = FULL_TOKENIZER_FILE | |
resolved_config_file = cached_file( | |
pretrained_model_name_or_path, | |
TOKENIZER_CONFIG_FILE, | |
cache_dir=cache_dir, | |
force_download=force_download, | |
resume_download=resume_download, | |
proxies=proxies, | |
token=token, | |
revision=revision, | |
local_files_only=local_files_only, | |
subfolder=subfolder, | |
user_agent=user_agent, | |
_raise_exceptions_for_gated_repo=False, | |
_raise_exceptions_for_missing_entries=False, | |
_raise_exceptions_for_connection_errors=False, | |
_commit_hash=commit_hash, | |
) | |
commit_hash = extract_commit_hash(resolved_config_file, commit_hash) | |
if resolved_config_file is not None: | |
with open(resolved_config_file, encoding="utf-8") as reader: | |
tokenizer_config = json.load(reader) | |
if "fast_tokenizer_files" in tokenizer_config: | |
fast_tokenizer_file = get_fast_tokenizer_file(tokenizer_config["fast_tokenizer_files"]) | |
vocab_files["tokenizer_file"] = fast_tokenizer_file | |
# Get files from url, cache, or disk depending on the case | |
resolved_vocab_files = {} | |
unresolved_files = [] | |
for file_id, file_path in vocab_files.items(): | |
if file_path is None: | |
resolved_vocab_files[file_id] = None | |
elif single_file_id == file_id: | |
if os.path.isfile(file_path): | |
resolved_vocab_files[file_id] = file_path | |
elif is_remote_url(file_path): | |
resolved_vocab_files[file_id] = download_url(file_path, proxies=proxies) | |
else: | |
resolved_vocab_files[file_id] = cached_file( | |
pretrained_model_name_or_path, | |
file_path, | |
cache_dir=cache_dir, | |
force_download=force_download, | |
proxies=proxies, | |
resume_download=resume_download, | |
local_files_only=local_files_only, | |
token=token, | |
user_agent=user_agent, | |
revision=revision, | |
subfolder=subfolder, | |
_raise_exceptions_for_gated_repo=False, | |
_raise_exceptions_for_missing_entries=False, | |
_raise_exceptions_for_connection_errors=False, | |
_commit_hash=commit_hash, | |
) | |
commit_hash = extract_commit_hash(resolved_vocab_files[file_id], commit_hash) | |
if len(unresolved_files) > 0: | |
logger.info( | |
f"Can't load following files from cache: {unresolved_files} and cannot check if these " | |
"files are necessary for the tokenizer to operate." | |
) | |
# If one passes a GGUF file path to `gguf_file` there is no need for this check as the tokenizer will be | |
# loaded directly from the GGUF file. | |
if all(full_file_name is None for full_file_name in resolved_vocab_files.values()) and not gguf_file: | |
raise EnvironmentError( | |
f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from " | |
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. " | |
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " | |
f"containing all relevant files for a {cls.__name__} tokenizer." | |
) | |
for file_id, file_path in vocab_files.items(): | |
if file_id not in resolved_vocab_files: | |
continue | |
if is_local: | |
logger.info(f"loading file {file_path}") | |
else: | |
logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}") | |
return cls._from_pretrained( | |
resolved_vocab_files, | |
pretrained_model_name_or_path, | |
init_configuration, | |
*init_inputs, | |
token=token, | |
cache_dir=cache_dir, | |
local_files_only=local_files_only, | |
_commit_hash=commit_hash, | |
_is_local=is_local, | |
trust_remote_code=trust_remote_code, | |
**kwargs, | |
) | |
def _from_pretrained( | |
cls, | |
resolved_vocab_files, | |
pretrained_model_name_or_path, | |
init_configuration, | |
*init_inputs, | |
token=None, | |
cache_dir=None, | |
local_files_only=False, | |
_commit_hash=None, | |
_is_local=False, | |
trust_remote_code=False, | |
**kwargs, | |
): | |
# We instantiate fast tokenizers based on a slow tokenizer if we don't have access to the tokenizer.json | |
# file or if `from_slow` is set to True. | |
from_slow = kwargs.get("from_slow", False) | |
gguf_file = kwargs.get("gguf_file", None) | |
has_tokenizer_file = resolved_vocab_files.get("tokenizer_file", None) is not None | |
# If one passes a GGUF file path to `gguf_file` there is no need for this check as the tokenizer will be | |
# loaded directly from the GGUF file. | |
if (from_slow or not has_tokenizer_file) and cls.slow_tokenizer_class is not None and not gguf_file: | |
slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained( | |
copy.deepcopy(resolved_vocab_files), | |
pretrained_model_name_or_path, | |
copy.deepcopy(init_configuration), | |
*init_inputs, | |
token=token, | |
cache_dir=cache_dir, | |
local_files_only=local_files_only, | |
_commit_hash=_commit_hash, | |
**(copy.deepcopy(kwargs)), | |
) | |
else: | |
slow_tokenizer = None | |
# Prepare tokenizer initialization kwargs | |
# Did we saved some inputs and kwargs to reload ? | |
tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None) | |
if tokenizer_config_file is not None: | |
with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle: | |
init_kwargs = json.load(tokenizer_config_handle) | |
# First attempt. We get tokenizer_class from tokenizer_config to check mismatch between tokenizers. | |
config_tokenizer_class = init_kwargs.get("tokenizer_class") | |
init_kwargs.pop("tokenizer_class", None) | |
if not has_tokenizer_file: | |
init_kwargs.pop("tokenizer_file", None) | |
saved_init_inputs = init_kwargs.pop("init_inputs", ()) | |
if not init_inputs: | |
init_inputs = saved_init_inputs | |
else: | |
config_tokenizer_class = None | |
init_kwargs = init_configuration | |
if not _is_local: | |
if "auto_map" in init_kwargs: | |
# For backward compatibility with odl format. | |
if isinstance(init_kwargs["auto_map"], (tuple, list)): | |
init_kwargs["auto_map"] = {"AutoTokenizer": init_kwargs["auto_map"]} | |
init_kwargs["auto_map"] = add_model_info_to_auto_map( | |
init_kwargs["auto_map"], pretrained_model_name_or_path | |
) | |
if "custom_pipelines" in init_kwargs: | |
init_kwargs["custom_pipelines"] = add_model_info_to_custom_pipelines( | |
init_kwargs["custom_pipelines"], pretrained_model_name_or_path | |
) | |
if config_tokenizer_class is None: | |
# Matt: This entire block is only used to decide if the tokenizer class matches the class in the repo. | |
# If not, it raises a warning, but otherwise continues. Since we mostly load tokenizers with | |
# AutoTokenizer these days, it seems like a lot of work (and a source of bugs) for little gain. | |
# Maybe we can just remove this entirely? | |
from .models.auto.configuration_auto import AutoConfig # tests_ignore | |
# Second attempt. If we have not yet found tokenizer_class, let's try to use the config. | |
try: | |
config = AutoConfig.from_pretrained( | |
pretrained_model_name_or_path, | |
token=token, | |
cache_dir=cache_dir, | |
local_files_only=local_files_only, | |
trust_remote_code=trust_remote_code, | |
_commit_hash=_commit_hash, | |
) | |
config_tokenizer_class = config.tokenizer_class | |
except (OSError, ValueError, KeyError): | |
# skip if an error occurred. | |
config = None | |
if config_tokenizer_class is None: | |
# Third attempt. If we have not yet found the original type of the tokenizer, | |
# we are loading we see if we can infer it from the type of the configuration file | |
from .models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES # tests_ignore | |
if hasattr(config, "model_type"): | |
model_type = config.model_type | |
else: | |
# Fallback: use pattern matching on the string. | |
model_type = None | |
for pattern in TOKENIZER_MAPPING_NAMES.keys(): | |
if pattern in str(pretrained_model_name_or_path): | |
model_type = pattern | |
break | |
if model_type is not None: | |
config_tokenizer_class, config_tokenizer_class_fast = TOKENIZER_MAPPING_NAMES.get( | |
model_type, (None, None) | |
) | |
if config_tokenizer_class is None: | |
config_tokenizer_class = config_tokenizer_class_fast | |
if config_tokenizer_class is not None: | |
if cls.__name__.replace("Fast", "") != config_tokenizer_class.replace("Fast", ""): | |
logger.warning( | |
"The tokenizer class you load from this checkpoint is not the same type as the class this" | |
" function is called from. It may result in unexpected tokenization. \nThe tokenizer class you" | |
f" load from this checkpoint is '{config_tokenizer_class}'. \nThe class this function is called" | |
f" from is '{cls.__name__}'." | |
) | |
# Update with newly provided kwargs | |
init_kwargs.update(kwargs) | |
# Merge resolved_vocab_files arguments in init_kwargs. | |
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None) | |
special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None) | |
for args_name, file_path in resolved_vocab_files.items(): | |
if args_name not in init_kwargs: | |
init_kwargs[args_name] = file_path | |
tokenizer_file = resolved_vocab_files.pop("tokenizer_file", None) | |
if slow_tokenizer is not None: | |
init_kwargs["__slow_tokenizer"] = slow_tokenizer | |
init_kwargs["name_or_path"] = pretrained_model_name_or_path | |
#### Handle tokenizer serialization of added and special tokens | |
added_tokens_decoder: Dict[int, AddedToken] = {} | |
added_tokens_map: Dict[str, AddedToken] = {} | |
# if we have info on the slow added tokens | |
if "added_tokens_decoder" in init_kwargs: | |
for idx, token in init_kwargs["added_tokens_decoder"].items(): | |
if isinstance(token, dict): | |
token = AddedToken(**token) | |
if isinstance(token, AddedToken): | |
added_tokens_decoder[int(idx)] = token | |
added_tokens_map[str(token)] = token | |
else: | |
raise ValueError( | |
f"Found a {token.__class__} in the saved `added_tokens_decoder`, should be a dictionary or an AddedToken instance" | |
) | |
else: | |
# begin legacy: read the added_tokens_file and update kwargs with special_tokens_map if modified | |
if special_tokens_map_file is not None: | |
with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle: | |
special_tokens_map = json.load(special_tokens_map_handle) | |
for key, value in special_tokens_map.items(): | |
if key in kwargs and kwargs[key]: | |
# This value has already been redefined by the kwargs | |
# We keep this new value and ignore the one stored in the special_tokens_map_file | |
continue | |
if isinstance(value, dict): | |
value["special"] = True | |
value = AddedToken(**value) | |
elif key == "additional_special_tokens" and isinstance(value, list): | |
additional_special_tokens = init_kwargs.pop("additional_special_tokens", []) or [] | |
for token in value: | |
if isinstance(token, dict): | |
token["special"] = True | |
token = AddedToken(**token) | |
if token not in additional_special_tokens: | |
additional_special_tokens.append(token) | |
value = additional_special_tokens | |
init_kwargs[key] = value | |
# slow -> slow|fast, legacy: convert the `"added_tokens.json"` file to `added_tokens_decoder`. | |
# this is for legacy purpose. We don't add the tokens after init for efficiency. | |
if added_tokens_file is not None: | |
special_tokens = [] | |
for key in cls.SPECIAL_TOKENS_ATTRIBUTES & init_kwargs.keys(): | |
if init_kwargs[key] is not None: | |
if key == "additional_special_tokens": | |
special_tokens += [str(token) for token in init_kwargs[key]] | |
else: | |
special_tokens.append(str(init_kwargs[key])) | |
with open(added_tokens_file, encoding="utf-8") as added_tokens_handle: | |
added_tok_encoder = json.load(added_tokens_handle) | |
for str_token, index in added_tok_encoder.items(): | |
# if index not in added_tokens_decoder and str_token not in added_tokens_map: | |
special = str_token in special_tokens | |
added_tokens_decoder[index] = AddedToken( | |
str_token, rstrip=False, lstrip=False, normalized=not special, special=special | |
) | |
added_tokens_map[str(token)] = added_tokens_decoder[index] | |
# allows converting a fast -> slow: add the `tokenizer.json`'s `"added_tokens"` to the slow tokenizer | |
# if `tokenizer_config.json` is `None` | |
if tokenizer_file is not None: | |
# This is for slow so can be done before | |
with open(tokenizer_file, encoding="utf-8") as tokenizer_file_handle: | |
tokenizer_file_handle = json.load(tokenizer_file_handle) | |
added_tokens = tokenizer_file_handle.pop("added_tokens") | |
for serialized_tokens in added_tokens: | |
idx = serialized_tokens.pop("id") | |
added_tokens_decoder[idx] = AddedToken(**serialized_tokens) | |
added_tokens_map[str(added_tokens_decoder[idx])] = added_tokens_decoder[idx] | |
# end legacy | |
# Passing AddedTokens and not strings to the class to prevent it from casting the string to a different AddedToken | |
# convert {'__type': 'AddedToken', 'content': '<ent>', 'lstrip': False, 'normalized': True, ...} to AddedTokens | |
init_kwargs["added_tokens_decoder"] = added_tokens_decoder | |
init_kwargs = cls.convert_added_tokens(init_kwargs, save=False) | |
for key in cls.SPECIAL_TOKENS_ATTRIBUTES & init_kwargs.keys(): | |
if added_tokens_map != {} and init_kwargs[key] is not None: | |
if key != "additional_special_tokens": | |
init_kwargs[key] = added_tokens_map.get(str(init_kwargs[key]), init_kwargs[key]) | |
# Instantiate the tokenizer. | |
try: | |
tokenizer = cls(*init_inputs, **init_kwargs) | |
except OSError: | |
raise OSError( | |
"Unable to load vocabulary from file. " | |
"Please check that the provided vocabulary is accessible and not corrupted." | |
) | |
if added_tokens_decoder != {} and max(list(added_tokens_decoder.keys())[-1], 0) > tokenizer.vocab_size: | |
logger.info( | |
"Special tokens have been added in the vocabulary, make sure the associated word embeddings are" | |
" fine-tuned or trained." | |
) | |
return tokenizer | |
def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length): | |
# This method should be deleted in Transformers v5 | |
# Its only purpose is to potentially throw a warning | |
# that incorrectly defined max lengths of T5's tokenizer are used | |
# which we will correct in Transformers v5. | |
return max_model_length | |
def convert_added_tokens(cls, obj: Union[AddedToken, Any], save=False, add_type_field=True): | |
if isinstance(obj, dict) and "__type" in obj and obj["__type"] == "AddedToken": | |
obj.pop("__type") | |
return AddedToken(**obj) | |
if isinstance(obj, AddedToken) and save: | |
obj = obj.__getstate__() | |
if add_type_field: | |
obj["__type"] = "AddedToken" | |
else: | |
# Don't save "special" for previous tokenizers | |
obj.pop("special") | |
return obj | |
elif isinstance(obj, (list, tuple)): | |
return [cls.convert_added_tokens(o, save=save, add_type_field=add_type_field) for o in obj] | |
elif isinstance(obj, dict): | |
return {k: cls.convert_added_tokens(v, save=save, add_type_field=add_type_field) for k, v in obj.items()} | |
return obj | |
def save_pretrained( | |
self, | |
save_directory: Union[str, os.PathLike], | |
legacy_format: Optional[bool] = None, | |
filename_prefix: Optional[str] = None, | |
push_to_hub: bool = False, | |
**kwargs, | |
) -> Tuple[str]: | |
""" | |
Save the full tokenizer state. | |
This method make sure the full tokenizer can then be re-loaded using the | |
[`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] class method.. | |
Warning,None This won't save modifications you may have applied to the tokenizer after the instantiation (for | |
instance, modifying `tokenizer.do_lower_case` after creation). | |
Args: | |
save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved. | |
legacy_format (`bool`, *optional*): | |
Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON | |
format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate | |
added_tokens files. | |
If `False`, will only save the tokenizer in the unified JSON format. This format is incompatible with | |
"slow" tokenizers (not powered by the *tokenizers* library), so the tokenizer will not be able to be | |
loaded in the corresponding "slow" tokenizer. | |
If `True`, will save the tokenizer in legacy format. If the "slow" tokenizer doesn't exits, a value | |
error is raised. | |
filename_prefix (`str`, *optional*): | |
A prefix to add to the names of the files saved by the tokenizer. | |
push_to_hub (`bool`, *optional*, defaults to `False`): | |
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the | |
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your | |
namespace). | |
kwargs (`Dict[str, Any]`, *optional*): | |
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. | |
Returns: | |
A tuple of `str`: The files saved. | |
""" | |
use_auth_token = kwargs.pop("use_auth_token", None) | |
if use_auth_token is not None: | |
warnings.warn( | |
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", | |
FutureWarning, | |
) | |
if kwargs.get("token", None) is not None: | |
raise ValueError( | |
"`token` and `use_auth_token` are both specified. Please set only the argument `token`." | |
) | |
kwargs["token"] = use_auth_token | |
if os.path.isfile(save_directory): | |
logger.error(f"Provided path ({save_directory}) should be a directory, not a file") | |
return | |
os.makedirs(save_directory, exist_ok=True) | |
if push_to_hub: | |
commit_message = kwargs.pop("commit_message", None) | |
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) | |
repo_id = self._create_repo(repo_id, **kwargs) | |
files_timestamps = self._get_files_timestamps(save_directory) | |
special_tokens_map_file = os.path.join( | |
save_directory, (filename_prefix + "-" if filename_prefix else "") + SPECIAL_TOKENS_MAP_FILE | |
) | |
tokenizer_config_file = os.path.join( | |
save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_CONFIG_FILE | |
) | |
tokenizer_config = copy.deepcopy(self.init_kwargs) | |
# Let's save the init kwargs | |
target_keys = set(self.init_kwargs.keys()) | |
# Let's save the special tokens map (only the strings) | |
target_keys.update(["model_max_length", "clean_up_tokenization_spaces"]) | |
for k in target_keys: | |
if hasattr(self, k): | |
tokenizer_config[k] = getattr(self, k) | |
# Let's make sure we properly save the special tokens. | |
tokenizer_config.update(self.special_tokens_map) | |
if self.chat_template is not None: | |
if isinstance(self.chat_template, dict): | |
# Chat template dicts are saved to the config as lists of dicts with fixed key names. | |
# They will be reconstructed as a single dict during loading. | |
tokenizer_config["chat_template"] = [{"name": k, "template": v} for k, v in self.chat_template.items()] | |
else: | |
tokenizer_config["chat_template"] = self.chat_template | |
if len(self.init_inputs) > 0: | |
tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs) | |
for file_id in self.vocab_files_names.keys(): | |
tokenizer_config.pop(file_id, None) | |
# no typefields, this way old fast and slow can load it | |
tokenizer_config = self.convert_added_tokens(tokenizer_config, add_type_field=True, save=True) | |
# Process added tokens seperatly: allows previous versions to ignore it! | |
added_tokens = {} | |
for key, value in self.added_tokens_decoder.items(): | |
added_tokens[key] = value.__getstate__() | |
tokenizer_config["added_tokens_decoder"] = added_tokens | |
# Add tokenizer class to the tokenizer config to be able to reload it with from_pretrained | |
tokenizer_class = self.__class__.__name__ | |
# Remove the Fast at the end unless we have a special `PreTrainedTokenizerFast` | |
if tokenizer_class.endswith("Fast") and tokenizer_class != "PreTrainedTokenizerFast": | |
tokenizer_class = tokenizer_class[:-4] | |
tokenizer_config["tokenizer_class"] = tokenizer_class | |
if getattr(self, "_auto_map", None) is not None: | |
tokenizer_config["auto_map"] = self._auto_map | |
if getattr(self, "_processor_class", None) is not None: | |
tokenizer_config["processor_class"] = self._processor_class | |
# If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be | |
# loaded from the Hub. | |
if self._auto_class is not None: | |
custom_object_save(self, save_directory, config=tokenizer_config) | |
# remove private information | |
if "name_or_path" in tokenizer_config: | |
tokenizer_config.pop("name_or_path") | |
tokenizer_config.pop("special_tokens_map_file", None) | |
tokenizer_config.pop("tokenizer_file", None) | |
if "device_map" in tokenizer_config: | |
tokenizer_config.pop("device_map") | |
with open(tokenizer_config_file, "w", encoding="utf-8") as f: | |
out_str = json.dumps(tokenizer_config, indent=2, sort_keys=True, ensure_ascii=False) + "\n" | |
f.write(out_str) | |
logger.info(f"tokenizer config file saved in {tokenizer_config_file}") | |
# Sanitize AddedTokens in special_tokens_map | |
# kept for forward compatibility, will be removed in transoformers 5. Typefields are not saved for FC, special should not be save either | |
write_dict = self.convert_added_tokens(self.special_tokens_map_extended, save=True, add_type_field=False) | |
with open(special_tokens_map_file, "w", encoding="utf-8") as f: | |
out_str = json.dumps(write_dict, indent=2, sort_keys=True, ensure_ascii=False) + "\n" | |
f.write(out_str) | |
logger.info(f"Special tokens file saved in {special_tokens_map_file}") | |
file_names = (tokenizer_config_file, special_tokens_map_file) | |
save_files = self._save_pretrained( | |
save_directory=save_directory, | |
file_names=file_names, | |
legacy_format=legacy_format, | |
filename_prefix=filename_prefix, | |
) | |
if push_to_hub: | |
self._upload_modified_files( | |
save_directory, | |
repo_id, | |
files_timestamps, | |
commit_message=commit_message, | |
token=kwargs.get("token"), | |
) | |
return save_files | |
def _save_pretrained( | |
self, | |
save_directory: Union[str, os.PathLike], | |
file_names: Tuple[str], | |
legacy_format: Optional[bool] = None, | |
filename_prefix: Optional[str] = None, | |
) -> Tuple[str]: | |
""" | |
Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens. | |
Fast tokenizers can also be saved in a unique JSON file containing {config + vocab + added-tokens} using the | |
specific [`~tokenization_utils_fast.PreTrainedTokenizerFast._save_pretrained`] | |
""" | |
if legacy_format is False: | |
raise ValueError( | |
"Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format." | |
) | |
save_directory = str(save_directory) | |
added_tokens_file = os.path.join( | |
save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE | |
) | |
# the new get_added_vocab() also returns special tokens and tokens that have an index < vocab_size | |
added_vocab = {tok: index for tok, index in self.added_tokens_encoder.items() if index >= self.vocab_size} | |
if added_vocab: | |
with open(added_tokens_file, "w", encoding="utf-8") as f: | |
out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n" | |
f.write(out_str) | |
logger.info(f"added tokens file saved in {added_tokens_file}") | |
vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix) | |
return file_names + vocab_files + (added_tokens_file,) | |
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: | |
""" | |
Save only the vocabulary of the tokenizer (vocabulary + added tokens). | |
This method won't save the configuration and special token mappings of the tokenizer. Use | |
[`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer. | |
Args: | |
save_directory (`str`): | |
The directory in which to save the vocabulary. | |
filename_prefix (`str`, *optional*): | |
An optional prefix to add to the named of the saved files. | |
Returns: | |
`Tuple(str)`: Paths to the files saved. | |
""" | |
raise NotImplementedError | |
def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]: | |
""" | |
Converts a string into a sequence of tokens, replacing unknown tokens with the `unk_token`. | |
Args: | |
text (`str`): | |
The sequence to be encoded. | |
pair (`str`, *optional*): | |
A second sequence to be encoded with the first. | |
add_special_tokens (`bool`, *optional*, defaults to `False`): | |
Whether or not to add the special tokens associated with the corresponding model. | |
kwargs (additional keyword arguments, *optional*): | |
Will be passed to the underlying model specific encode method. See details in | |
[`~PreTrainedTokenizerBase.__call__`] | |
Returns: | |
`List[str]`: The list of tokens. | |
""" | |
raise NotImplementedError | |
def encode( | |
self, | |
text: Union[TextInput, PreTokenizedInput, EncodedInput], | |
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, | |
add_special_tokens: bool = True, | |
padding: Union[bool, str, PaddingStrategy] = False, | |
truncation: Union[bool, str, TruncationStrategy] = None, | |
max_length: Optional[int] = None, | |
stride: int = 0, | |
return_tensors: Optional[Union[str, TensorType]] = None, | |
**kwargs, | |
) -> List[int]: | |
""" | |
Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. | |
Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`. | |
Args: | |
text (`str`, `List[str]` or `List[int]`): | |
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the | |
`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` | |
method). | |
text_pair (`str`, `List[str]` or `List[int]`, *optional*): | |
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using | |
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` | |
method). | |
""" | |
encoded_inputs = self.encode_plus( | |
text, | |
text_pair=text_pair, | |
add_special_tokens=add_special_tokens, | |
padding=padding, | |
truncation=truncation, | |
max_length=max_length, | |
stride=stride, | |
return_tensors=return_tensors, | |
**kwargs, | |
) | |
return encoded_inputs["input_ids"] | |
def num_special_tokens_to_add(self, pair: bool = False) -> int: | |
raise NotImplementedError | |
def _get_padding_truncation_strategies( | |
self, padding=False, truncation=None, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs | |
): | |
""" | |
Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy | |
and pad_to_max_length) and behaviors. | |
""" | |
old_truncation_strategy = kwargs.pop("truncation_strategy", "do_not_truncate") | |
old_pad_to_max_length = kwargs.pop("pad_to_max_length", False) | |
# Backward compatibility for previous behavior, maybe we should deprecate it: | |
# If you only set max_length, it activates truncation for max_length | |
if max_length is not None and padding is False and truncation is None: | |
if verbose: | |
if not self.deprecation_warnings.get("Truncation-not-explicitly-activated", False): | |
logger.warning( | |
"Truncation was not explicitly activated but `max_length` is provided a specific value, please" | |
" use `truncation=True` to explicitly truncate examples to max length. Defaulting to" | |
" 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the" | |
" tokenizer you can select this strategy more precisely by providing a specific strategy to" | |
" `truncation`." | |
) | |
self.deprecation_warnings["Truncation-not-explicitly-activated"] = True | |
truncation = "longest_first" | |
# Get padding strategy | |
if padding is False and old_pad_to_max_length: | |
if verbose: | |
warnings.warn( | |
"The `pad_to_max_length` argument is deprecated and will be removed in a future version, " | |
"use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or " | |
"use `padding='max_length'` to pad to a max length. In this case, you can give a specific " | |
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the " | |
"maximal input size of the model (e.g. 512 for Bert).", | |
FutureWarning, | |
) | |
if max_length is None: | |
padding_strategy = PaddingStrategy.LONGEST | |
else: | |
padding_strategy = PaddingStrategy.MAX_LENGTH | |
elif padding is not False: | |
if padding is True: | |
if verbose: | |
if max_length is not None and ( | |
truncation is None or truncation is False or truncation == "do_not_truncate" | |
): | |
warnings.warn( | |
"`max_length` is ignored when `padding`=`True` and there is no truncation strategy. " | |
"To pad to max length, use `padding='max_length'`." | |
) | |
if old_pad_to_max_length is not False: | |
warnings.warn("Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.") | |
padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch | |
elif not isinstance(padding, PaddingStrategy): | |
padding_strategy = PaddingStrategy(padding) | |
elif isinstance(padding, PaddingStrategy): | |
padding_strategy = padding | |
else: | |
padding_strategy = PaddingStrategy.DO_NOT_PAD | |
# Get truncation strategy | |
if truncation is None and old_truncation_strategy != "do_not_truncate": | |
if verbose: | |
warnings.warn( | |
"The `truncation_strategy` argument is deprecated and will be removed in a future version, use" | |
" `truncation=True` to truncate examples to a max length. You can give a specific length with" | |
" `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the maximal input" | |
" size of the model (e.g. 512 for Bert). If you have pairs of inputs, you can give a specific" | |
" truncation strategy selected among `truncation='only_first'` (will only truncate the first" | |
" sentence in the pairs) `truncation='only_second'` (will only truncate the second sentence in the" | |
" pairs) or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence" | |
" in the pairs).", | |
FutureWarning, | |
) | |
truncation_strategy = TruncationStrategy(old_truncation_strategy) | |
elif truncation is not False and truncation is not None: | |
if truncation is True: | |
truncation_strategy = ( | |
TruncationStrategy.LONGEST_FIRST | |
) # Default to truncate the longest sequences in pairs of inputs | |
elif not isinstance(truncation, TruncationStrategy): | |
truncation_strategy = TruncationStrategy(truncation) | |
elif isinstance(truncation, TruncationStrategy): | |
truncation_strategy = truncation | |
else: | |
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE | |
# Set max length if needed | |
if max_length is None: | |
if padding_strategy == PaddingStrategy.MAX_LENGTH: | |
if self.model_max_length > LARGE_INTEGER: | |
if verbose: | |
if not self.deprecation_warnings.get("Asking-to-pad-to-max_length", False): | |
logger.warning( | |
"Asking to pad to max_length but no maximum length is provided and the model has no" | |
" predefined maximum length. Default to no padding." | |
) | |
self.deprecation_warnings["Asking-to-pad-to-max_length"] = True | |
padding_strategy = PaddingStrategy.DO_NOT_PAD | |
else: | |
max_length = self.model_max_length | |
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE: | |
if self.model_max_length > LARGE_INTEGER: | |
if verbose: | |
if not self.deprecation_warnings.get("Asking-to-truncate-to-max_length", False): | |
logger.warning( | |
"Asking to truncate to max_length but no maximum length is provided and the model has" | |
" no predefined maximum length. Default to no truncation." | |
) | |
self.deprecation_warnings["Asking-to-truncate-to-max_length"] = True | |
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE | |
else: | |
max_length = self.model_max_length | |
# Test if we have a padding token | |
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.pad_token is None or self.pad_token_id < 0): | |
raise ValueError( | |
"Asking to pad but the tokenizer does not have a padding token. " | |
"Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` " | |
"or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`." | |
) | |
# Check that we will truncate to a multiple of pad_to_multiple_of if both are provided | |
if ( | |
truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE | |
and padding_strategy != PaddingStrategy.DO_NOT_PAD | |
and pad_to_multiple_of is not None | |
and max_length is not None | |
and (max_length % pad_to_multiple_of != 0) | |
): | |
raise ValueError( | |
"Truncation and padding are both activated but " | |
f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})." | |
) | |
return padding_strategy, truncation_strategy, max_length, kwargs | |
def __call__( | |
self, | |
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, | |
text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, | |
text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, | |
text_pair_target: Optional[ | |
Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] | |
] = None, | |
add_special_tokens: bool = True, | |
padding: Union[bool, str, PaddingStrategy] = False, | |
truncation: Union[bool, str, TruncationStrategy] = None, | |
max_length: Optional[int] = None, | |
stride: int = 0, | |
is_split_into_words: bool = False, | |
pad_to_multiple_of: Optional[int] = None, | |
return_tensors: Optional[Union[str, TensorType]] = None, | |
return_token_type_ids: Optional[bool] = None, | |
return_attention_mask: Optional[bool] = None, | |
return_overflowing_tokens: bool = False, | |
return_special_tokens_mask: bool = False, | |
return_offsets_mapping: bool = False, | |
return_length: bool = False, | |
verbose: bool = True, | |
**kwargs, | |
) -> BatchEncoding: | |
""" | |
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of | |
sequences. | |
Args: | |
text (`str`, `List[str]`, `List[List[str]]`, *optional*): | |
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings | |
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set | |
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences). | |
text_pair (`str`, `List[str]`, `List[List[str]]`, *optional*): | |
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings | |
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set | |
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences). | |
text_target (`str`, `List[str]`, `List[List[str]]`, *optional*): | |
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a | |
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), | |
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). | |
text_pair_target (`str`, `List[str]`, `List[List[str]]`, *optional*): | |
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a | |
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), | |
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). | |
""" | |
# To avoid duplicating | |
all_kwargs = { | |
"add_special_tokens": add_special_tokens, | |
"padding": padding, | |
"truncation": truncation, | |
"max_length": max_length, | |
"stride": stride, | |
"is_split_into_words": is_split_into_words, | |
"pad_to_multiple_of": pad_to_multiple_of, | |
"return_tensors": return_tensors, | |
"return_token_type_ids": return_token_type_ids, | |
"return_attention_mask": return_attention_mask, | |
"return_overflowing_tokens": return_overflowing_tokens, | |
"return_special_tokens_mask": return_special_tokens_mask, | |
"return_offsets_mapping": return_offsets_mapping, | |
"return_length": return_length, | |
"split_special_tokens": kwargs.pop("split_special_tokens", self.split_special_tokens), | |
"verbose": verbose, | |
} | |
all_kwargs.update(kwargs) | |
if text is None and text_target is None: | |
raise ValueError("You need to specify either `text` or `text_target`.") | |
if text is not None: | |
# The context manager will send the inputs as normal texts and not text_target, but we shouldn't change the | |
# input mode in this case. | |
if not self._in_target_context_manager: | |
self._switch_to_input_mode() | |
encodings = self._call_one(text=text, text_pair=text_pair, **all_kwargs) | |
if text_target is not None: | |
self._switch_to_target_mode() | |
target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **all_kwargs) | |
# Leave back tokenizer in input mode | |
self._switch_to_input_mode() | |
if text_target is None: | |
return encodings | |
elif text is None: | |
return target_encodings | |
else: | |
encodings["labels"] = target_encodings["input_ids"] | |
return encodings | |
def _call_one( | |
self, | |
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], | |
text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, | |
add_special_tokens: bool = True, | |
padding: Union[bool, str, PaddingStrategy] = False, | |
truncation: Union[bool, str, TruncationStrategy] = None, | |
max_length: Optional[int] = None, | |
stride: int = 0, | |
is_split_into_words: bool = False, | |
pad_to_multiple_of: Optional[int] = None, | |
return_tensors: Optional[Union[str, TensorType]] = None, | |
return_token_type_ids: Optional[bool] = None, | |
return_attention_mask: Optional[bool] = None, | |
return_overflowing_tokens: bool = False, | |
return_special_tokens_mask: bool = False, | |
return_offsets_mapping: bool = False, | |
return_length: bool = False, | |
verbose: bool = True, | |
split_special_tokens: bool = False, | |
**kwargs, | |
) -> BatchEncoding: | |
# Input type checking for clearer error | |
def _is_valid_text_input(t): | |
if isinstance(t, str): | |
# Strings are fine | |
return True | |
elif isinstance(t, (list, tuple)): | |
# List are fine as long as they are... | |
if len(t) == 0: | |
# ... empty | |
return True | |
elif isinstance(t[0], str): | |
# ... list of strings | |
return True | |
elif isinstance(t[0], (list, tuple)): | |
# ... list with an empty list or with a list of strings | |
return len(t[0]) == 0 or isinstance(t[0][0], str) | |
else: | |
return False | |
else: | |
return False | |
if not _is_valid_text_input(text): | |
raise ValueError( | |
"text input must be of type `str` (single example), `List[str]` (batch or single pretokenized example) " | |
"or `List[List[str]]` (batch of pretokenized examples)." | |
) | |
if text_pair is not None and not _is_valid_text_input(text_pair): | |
raise ValueError( | |
"text input must be of type `str` (single example), `List[str]` (batch or single pretokenized example) " | |
"or `List[List[str]]` (batch of pretokenized examples)." | |
) | |
if is_split_into_words: | |
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) | |
else: | |
is_batched = isinstance(text, (list, tuple)) | |
if is_batched: | |
if isinstance(text_pair, str): | |
raise TypeError( | |
"when tokenizing batches of text, `text_pair` must be a list or tuple with the same length as" | |
" `text`." | |
) | |
if text_pair is not None and len(text) != len(text_pair): | |
raise ValueError( | |
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" | |
f" {len(text_pair)}." | |
) | |
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text | |
return self.batch_encode_plus( | |
batch_text_or_text_pairs=batch_text_or_text_pairs, | |
add_special_tokens=add_special_tokens, | |
padding=padding, | |
truncation=truncation, | |
max_length=max_length, | |
stride=stride, | |
is_split_into_words=is_split_into_words, | |
pad_to_multiple_of=pad_to_multiple_of, | |
return_tensors=return_tensors, | |
return_token_type_ids=return_token_type_ids, | |
return_attention_mask=return_attention_mask, | |
return_overflowing_tokens=return_overflowing_tokens, | |
return_special_tokens_mask=return_special_tokens_mask, | |
return_offsets_mapping=return_offsets_mapping, | |
return_length=return_length, | |
verbose=verbose, | |
split_special_tokens=split_special_tokens, | |
**kwargs, | |
) | |
else: | |
return self.encode_plus( | |
text=text, | |
text_pair=text_pair, | |
add_special_tokens=add_special_tokens, | |
padding=padding, | |
truncation=truncation, | |
max_length=max_length, | |
stride=stride, | |
is_split_into_words=is_split_into_words, | |
pad_to_multiple_of=pad_to_multiple_of, | |
return_tensors=return_tensors, | |
return_token_type_ids=return_token_type_ids, | |
return_attention_mask=return_attention_mask, | |
return_overflowing_tokens=return_overflowing_tokens, | |
return_special_tokens_mask=return_special_tokens_mask, | |
return_offsets_mapping=return_offsets_mapping, | |
return_length=return_length, | |
verbose=verbose, | |
split_special_tokens=split_special_tokens, | |
**kwargs, | |
) | |
def encode_plus( | |
self, | |
text: Union[TextInput, PreTokenizedInput, EncodedInput], | |
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, | |
add_special_tokens: bool = True, | |
padding: Union[bool, str, PaddingStrategy] = False, | |
truncation: Union[bool, str, TruncationStrategy] = None, | |
max_length: Optional[int] = None, | |
stride: int = 0, | |
is_split_into_words: bool = False, | |
pad_to_multiple_of: Optional[int] = None, | |
return_tensors: Optional[Union[str, TensorType]] = None, | |
return_token_type_ids: Optional[bool] = None, | |
return_attention_mask: Optional[bool] = None, | |
return_overflowing_tokens: bool = False, | |
return_special_tokens_mask: bool = False, | |
return_offsets_mapping: bool = False, | |
return_length: bool = False, | |
verbose: bool = True, | |
**kwargs, | |
) -> BatchEncoding: | |
""" | |
Tokenize and prepare for the model a sequence or a pair of sequences. | |
<Tip warning={true}> | |
This method is deprecated, `__call__` should be used instead. | |
</Tip> | |
Args: | |
text (`str`, `List[str]` or (for non-fast tokenizers) `List[int]`): | |
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the | |
`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` | |
method). | |
text_pair (`str`, `List[str]` or `List[int]`, *optional*): | |
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using | |
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` | |
method). | |
""" | |
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length' | |
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( | |
padding=padding, | |
truncation=truncation, | |
max_length=max_length, | |
pad_to_multiple_of=pad_to_multiple_of, | |
verbose=verbose, | |
**kwargs, | |
) | |
return self._encode_plus( | |
text=text, | |
text_pair=text_pair, | |
add_special_tokens=add_special_tokens, | |
padding_strategy=padding_strategy, | |
truncation_strategy=truncation_strategy, | |
max_length=max_length, | |
stride=stride, | |
is_split_into_words=is_split_into_words, | |
pad_to_multiple_of=pad_to_multiple_of, | |
return_tensors=return_tensors, | |
return_token_type_ids=return_token_type_ids, | |
return_attention_mask=return_attention_mask, | |
return_overflowing_tokens=return_overflowing_tokens, | |
return_special_tokens_mask=return_special_tokens_mask, | |
return_offsets_mapping=return_offsets_mapping, | |
return_length=return_length, | |
verbose=verbose, | |
split_special_tokens=kwargs.pop("split_special_tokens", self.split_special_tokens), | |
**kwargs, | |
) | |
def _encode_plus( | |
self, | |
text: Union[TextInput, PreTokenizedInput, EncodedInput], | |
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, | |
add_special_tokens: bool = True, | |
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, | |
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, | |
max_length: Optional[int] = None, | |
stride: int = 0, | |
is_split_into_words: bool = False, | |
pad_to_multiple_of: Optional[int] = None, | |
return_tensors: Optional[Union[str, TensorType]] = None, | |
return_token_type_ids: Optional[bool] = None, | |
return_attention_mask: Optional[bool] = None, | |
return_overflowing_tokens: bool = False, | |
return_special_tokens_mask: bool = False, | |
return_offsets_mapping: bool = False, | |
return_length: bool = False, | |
verbose: bool = True, | |
split_special_tokens: bool = False, | |
**kwargs, | |
) -> BatchEncoding: | |
raise NotImplementedError | |
def batch_encode_plus( | |
self, | |
batch_text_or_text_pairs: Union[ | |
List[TextInput], | |
List[TextInputPair], | |
List[PreTokenizedInput], | |
List[PreTokenizedInputPair], | |
List[EncodedInput], | |
List[EncodedInputPair], | |
], | |
add_special_tokens: bool = True, | |
padding: Union[bool, str, PaddingStrategy] = False, | |
truncation: Union[bool, str, TruncationStrategy] = None, | |
max_length: Optional[int] = None, | |
stride: int = 0, | |
is_split_into_words: bool = False, | |
pad_to_multiple_of: Optional[int] = None, | |
return_tensors: Optional[Union[str, TensorType]] = None, | |
return_token_type_ids: Optional[bool] = None, | |
return_attention_mask: Optional[bool] = None, | |
return_overflowing_tokens: bool = False, | |
return_special_tokens_mask: bool = False, | |
return_offsets_mapping: bool = False, | |
return_length: bool = False, | |
verbose: bool = True, | |
split_special_tokens: bool = False, | |
**kwargs, | |
) -> BatchEncoding: | |
""" | |
Tokenize and prepare for the model a list of sequences or a list of pairs of sequences. | |
<Tip warning={true}> | |
This method is deprecated, `__call__` should be used instead. | |
</Tip> | |
Args: | |
batch_text_or_text_pairs (`List[str]`, `List[Tuple[str, str]]`, `List[List[str]]`, `List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also `List[List[int]]`, `List[Tuple[List[int], List[int]]]`): | |
Batch of sequences or pair of sequences to be encoded. This can be a list of | |
string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see | |
details in `encode_plus`). | |
""" | |
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length' | |
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( | |
padding=padding, | |
truncation=truncation, | |
max_length=max_length, | |
pad_to_multiple_of=pad_to_multiple_of, | |
verbose=verbose, | |
**kwargs, | |
) | |
return self._batch_encode_plus( | |
batch_text_or_text_pairs=batch_text_or_text_pairs, | |
add_special_tokens=add_special_tokens, | |
padding_strategy=padding_strategy, | |
truncation_strategy=truncation_strategy, | |
max_length=max_length, | |
stride=stride, | |
is_split_into_words=is_split_into_words, | |
pad_to_multiple_of=pad_to_multiple_of, | |
return_tensors=return_tensors, | |
return_token_type_ids=return_token_type_ids, | |
return_attention_mask=return_attention_mask, | |
return_overflowing_tokens=return_overflowing_tokens, | |
return_special_tokens_mask=return_special_tokens_mask, | |
return_offsets_mapping=return_offsets_mapping, | |
return_length=return_length, | |
verbose=verbose, | |
split_special_tokens=split_special_tokens, | |
**kwargs, | |
) | |
def _batch_encode_plus( | |
self, | |
batch_text_or_text_pairs: Union[ | |
List[TextInput], | |
List[TextInputPair], | |
List[PreTokenizedInput], | |
List[PreTokenizedInputPair], | |
List[EncodedInput], | |
List[EncodedInputPair], | |
], | |
add_special_tokens: bool = True, | |
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, | |
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, | |
max_length: Optional[int] = None, | |
stride: int = 0, | |
is_split_into_words: bool = False, | |
pad_to_multiple_of: Optional[int] = None, | |
return_tensors: Optional[Union[str, TensorType]] = None, | |
return_token_type_ids: Optional[bool] = None, | |
return_attention_mask: Optional[bool] = None, | |
return_overflowing_tokens: bool = False, | |
return_special_tokens_mask: bool = False, | |
return_offsets_mapping: bool = False, | |
return_length: bool = False, | |
verbose: bool = True, | |
split_special_tokens: bool = False, | |
**kwargs, | |
) -> BatchEncoding: | |
raise NotImplementedError | |
def pad( | |
self, | |
encoded_inputs: Union[ | |
BatchEncoding, | |
List[BatchEncoding], | |
Dict[str, EncodedInput], | |
Dict[str, List[EncodedInput]], | |
List[Dict[str, EncodedInput]], | |
], | |
padding: Union[bool, str, PaddingStrategy] = True, | |
max_length: Optional[int] = None, | |
pad_to_multiple_of: Optional[int] = None, | |
return_attention_mask: Optional[bool] = None, | |
return_tensors: Optional[Union[str, TensorType]] = None, | |
verbose: bool = True, | |
) -> BatchEncoding: | |
""" | |
Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length | |
in the batch. | |
Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`, | |
`self.pad_token_id` and `self.pad_token_type_id`). | |
Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the | |
text followed by a call to the `pad` method to get a padded encoding. | |
<Tip> | |
If the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the | |
result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of | |
PyTorch tensors, you will lose the specific device of your tensors however. | |
</Tip> | |
Args: | |
encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`): | |
Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of | |
tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str, | |
List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader | |
collate function. | |
Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see | |
the note above for the return type. | |
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): | |
Select a strategy to pad the returned sequences (according to the model's padding side and padding | |
index) among: | |
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single | |
sequence if provided). | |
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum | |
acceptable input length for the model if that argument is not provided. | |
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different | |
lengths). | |
max_length (`int`, *optional*): | |
Maximum length of the returned list and optionally padding length (see above). | |
pad_to_multiple_of (`int`, *optional*): | |
If set will pad the sequence to a multiple of the provided value. | |
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability | |
`>= 7.5` (Volta). | |
return_attention_mask (`bool`, *optional*): | |
Whether to return the attention mask. If left to the default, will return the attention mask according | |
to the specific tokenizer's default, defined by the `return_outputs` attribute. | |
[What are attention masks?](../glossary#attention-mask) | |
return_tensors (`str` or [`~utils.TensorType`], *optional*): | |
If set, will return tensors instead of list of python integers. Acceptable values are: | |
- `'tf'`: Return TensorFlow `tf.constant` objects. | |
- `'pt'`: Return PyTorch `torch.Tensor` objects. | |
- `'np'`: Return Numpy `np.ndarray` objects. | |
verbose (`bool`, *optional*, defaults to `True`): | |
Whether or not to print more information and warnings. | |
""" | |
if self.__class__.__name__.endswith("Fast"): | |
if not self.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False): | |
logger.warning_advice( | |
f"You're using a {self.__class__.__name__} tokenizer. Please note that with a fast tokenizer," | |
" using the `__call__` method is faster than using a method to encode the text followed by a call" | |
" to the `pad` method to get a padded encoding." | |
) | |
self.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True | |
# If we have a list of dicts, let's convert it in a dict of lists | |
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader | |
if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping): | |
encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()} | |
# The model's main input name, usually `input_ids`, has be passed for padding | |
if self.model_input_names[0] not in encoded_inputs: | |
raise ValueError( | |
"You should supply an encoding or a list of encodings to this method " | |
f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}" | |
) | |
required_input = encoded_inputs[self.model_input_names[0]] | |
if required_input is None or (isinstance(required_input, Sized) and len(required_input) == 0): | |
if return_attention_mask: | |
encoded_inputs["attention_mask"] = [] | |
return encoded_inputs | |
# If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects | |
# and rebuild them afterwards if no return_tensors is specified | |
# Note that we lose the specific device the tensor may be on for PyTorch | |
first_element = required_input[0] | |
if isinstance(first_element, (list, tuple)): | |
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. | |
for item in required_input: | |
if len(item) != 0: | |
first_element = item[0] | |
break | |
# At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do. | |
if not isinstance(first_element, (int, list, tuple)): | |
if is_tf_tensor(first_element): | |
return_tensors = "tf" if return_tensors is None else return_tensors | |
elif is_torch_tensor(first_element): | |
return_tensors = "pt" if return_tensors is None else return_tensors | |
elif isinstance(first_element, np.ndarray): | |
return_tensors = "np" if return_tensors is None else return_tensors | |
else: | |
raise ValueError( | |
f"type of {first_element} unknown: {type(first_element)}. " | |
"Should be one of a python, numpy, pytorch or tensorflow object." | |
) | |
for key, value in encoded_inputs.items(): | |
encoded_inputs[key] = to_py_obj(value) | |
# Convert padding_strategy in PaddingStrategy | |
padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies( | |
padding=padding, max_length=max_length, verbose=verbose | |
) | |
required_input = encoded_inputs[self.model_input_names[0]] | |
if required_input and not isinstance(required_input[0], (list, tuple)): | |
encoded_inputs = self._pad( | |
encoded_inputs, | |
max_length=max_length, | |
padding_strategy=padding_strategy, | |
pad_to_multiple_of=pad_to_multiple_of, | |
return_attention_mask=return_attention_mask, | |
) | |
return BatchEncoding(encoded_inputs, tensor_type=return_tensors) | |
batch_size = len(required_input) | |
assert all( | |
len(v) == batch_size for v in encoded_inputs.values() | |
), "Some items in the output dictionary have a different batch size than others." | |
if padding_strategy == PaddingStrategy.LONGEST: | |
max_length = max(len(inputs) for inputs in required_input) | |
padding_strategy = PaddingStrategy.MAX_LENGTH | |
batch_outputs = {} | |
for i in range(batch_size): | |
inputs = {k: v[i] for k, v in encoded_inputs.items()} | |
outputs = self._pad( | |
inputs, | |
max_length=max_length, | |
padding_strategy=padding_strategy, | |
pad_to_multiple_of=pad_to_multiple_of, | |
return_attention_mask=return_attention_mask, | |
) | |
for key, value in outputs.items(): | |
if key not in batch_outputs: | |
batch_outputs[key] = [] | |
batch_outputs[key].append(value) | |
return BatchEncoding(batch_outputs, tensor_type=return_tensors) | |
def create_token_type_ids_from_sequences( | |
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None | |
) -> List[int]: | |
""" | |
Create the token type IDs corresponding to the sequences passed. [What are token type | |
IDs?](../glossary#token-type-ids) | |
Should be overridden in a subclass if the model has a special way of building those. | |
Args: | |
token_ids_0 (`List[int]`): The first tokenized sequence. | |
token_ids_1 (`List[int]`, *optional*): The second tokenized sequence. | |
Returns: | |
`List[int]`: The token type ids. | |
""" | |
if token_ids_1 is None: | |
return len(token_ids_0) * [0] | |
return [0] * len(token_ids_0) + [1] * len(token_ids_1) | |
def build_inputs_with_special_tokens( | |
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None | |
) -> List[int]: | |
""" | |
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and | |
adding special tokens. | |
This implementation does not add special tokens and this method should be overridden in a subclass. | |
Args: | |
token_ids_0 (`List[int]`): The first tokenized sequence. | |
token_ids_1 (`List[int]`, *optional*): The second tokenized sequence. | |
Returns: | |
`List[int]`: The model input with special tokens. | |
""" | |
if token_ids_1 is None: | |
return token_ids_0 | |
return token_ids_0 + token_ids_1 | |
def prepare_for_model( | |
self, | |
ids: List[int], | |
pair_ids: Optional[List[int]] = None, | |
add_special_tokens: bool = True, | |
padding: Union[bool, str, PaddingStrategy] = False, | |
truncation: Union[bool, str, TruncationStrategy] = None, | |
max_length: Optional[int] = None, | |
stride: int = 0, | |
pad_to_multiple_of: Optional[int] = None, | |
return_tensors: Optional[Union[str, TensorType]] = None, | |
return_token_type_ids: Optional[bool] = None, | |
return_attention_mask: Optional[bool] = None, | |
return_overflowing_tokens: bool = False, | |
return_special_tokens_mask: bool = False, | |
return_offsets_mapping: bool = False, | |
return_length: bool = False, | |
verbose: bool = True, | |
prepend_batch_axis: bool = False, | |
**kwargs, | |
) -> BatchEncoding: | |
""" | |
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It | |
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and | |
manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids* | |
different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return | |
overflowing tokens. Such a combination of arguments will raise an error. | |
Args: | |
ids (`List[int]`): | |
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and | |
`convert_tokens_to_ids` methods. | |
pair_ids (`List[int]`, *optional*): | |
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` | |
and `convert_tokens_to_ids` methods. | |
""" | |
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length' | |
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( | |
padding=padding, | |
truncation=truncation, | |
max_length=max_length, | |
pad_to_multiple_of=pad_to_multiple_of, | |
verbose=verbose, | |
**kwargs, | |
) | |
pair = bool(pair_ids is not None) | |
len_ids = len(ids) | |
len_pair_ids = len(pair_ids) if pair else 0 | |
if return_token_type_ids and not add_special_tokens: | |
raise ValueError( | |
"Asking to return token_type_ids while setting add_special_tokens to False " | |
"results in an undefined behavior. Please set add_special_tokens to True or " | |
"set return_token_type_ids to None." | |
) | |
if ( | |
return_overflowing_tokens | |
and truncation_strategy == TruncationStrategy.LONGEST_FIRST | |
and pair_ids is not None | |
): | |
raise ValueError( | |
"Not possible to return overflowing tokens for pair of sequences with the " | |
"`longest_first`. Please select another truncation strategy than `longest_first`, " | |
"for instance `only_second` or `only_first`." | |
) | |
# Load from model defaults | |
if return_token_type_ids is None: | |
return_token_type_ids = "token_type_ids" in self.model_input_names | |
if return_attention_mask is None: | |
return_attention_mask = "attention_mask" in self.model_input_names | |
encoded_inputs = {} | |
# Compute the total size of the returned encodings | |
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) | |
# Truncation: Handle max sequence length | |
overflowing_tokens = [] | |
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: | |
ids, pair_ids, overflowing_tokens = self.truncate_sequences( | |
ids, | |
pair_ids=pair_ids, | |
num_tokens_to_remove=total_len - max_length, | |
truncation_strategy=truncation_strategy, | |
stride=stride, | |
) | |
if return_overflowing_tokens: | |
encoded_inputs["overflowing_tokens"] = overflowing_tokens | |
encoded_inputs["num_truncated_tokens"] = total_len - max_length | |
# Add special tokens | |
if add_special_tokens: | |
sequence = self.build_inputs_with_special_tokens(ids, pair_ids) | |
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) | |
else: | |
sequence = ids + pair_ids if pair else ids | |
token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) | |
# Build output dictionary | |
encoded_inputs["input_ids"] = sequence | |
if return_token_type_ids: | |
encoded_inputs["token_type_ids"] = token_type_ids | |
if return_special_tokens_mask: | |
if add_special_tokens: | |
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) | |
else: | |
encoded_inputs["special_tokens_mask"] = [0] * len(sequence) | |
# Check lengths | |
self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) | |
# Padding | |
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: | |
encoded_inputs = self.pad( | |
encoded_inputs, | |
max_length=max_length, | |
padding=padding_strategy.value, | |
pad_to_multiple_of=pad_to_multiple_of, | |
return_attention_mask=return_attention_mask, | |
) | |
if return_length: | |
encoded_inputs["length"] = len(encoded_inputs["input_ids"]) | |
batch_outputs = BatchEncoding( | |
encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis | |
) | |
return batch_outputs | |
def truncate_sequences( | |
self, | |
ids: List[int], | |
pair_ids: Optional[List[int]] = None, | |
num_tokens_to_remove: int = 0, | |
truncation_strategy: Union[str, TruncationStrategy] = "longest_first", | |
stride: int = 0, | |
) -> Tuple[List[int], List[int], List[int]]: | |
""" | |
Truncates a sequence pair in-place following the strategy. | |
Args: | |
ids (`List[int]`): | |
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and | |
`convert_tokens_to_ids` methods. | |
pair_ids (`List[int]`, *optional*): | |
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` | |
and `convert_tokens_to_ids` methods. | |
num_tokens_to_remove (`int`, *optional*, defaults to 0): | |
Number of tokens to remove using the truncation strategy. | |
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `'longest_first'`): | |
The strategy to follow for truncation. Can be: | |
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the | |
maximum acceptable input length for the model if that argument is not provided. This will truncate | |
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a | |
batch of pairs) is provided. | |
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the | |
maximum acceptable input length for the model if that argument is not provided. This will only | |
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. | |
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the | |
maximum acceptable input length for the model if that argument is not provided. This will only | |
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. | |
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater | |
than the model maximum admissible input size). | |
stride (`int`, *optional*, defaults to 0): | |
If set to a positive number, the overflowing tokens returned will contain some tokens from the main | |
sequence returned. The value of this argument defines the number of additional tokens. | |
Returns: | |
`Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of | |
overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair | |
of sequences (or a batch of pairs) is provided. | |
""" | |
if num_tokens_to_remove <= 0: | |
return ids, pair_ids, [] | |
if not isinstance(truncation_strategy, TruncationStrategy): | |
truncation_strategy = TruncationStrategy(truncation_strategy) | |
overflowing_tokens = [] | |
if truncation_strategy == TruncationStrategy.ONLY_FIRST or ( | |
truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None | |
): | |
if len(ids) > num_tokens_to_remove: | |
window_len = min(len(ids), stride + num_tokens_to_remove) | |
if self.truncation_side == "left": | |
overflowing_tokens = ids[:window_len] | |
ids = ids[num_tokens_to_remove:] | |
elif self.truncation_side == "right": | |
overflowing_tokens = ids[-window_len:] | |
ids = ids[:-num_tokens_to_remove] | |
else: | |
raise ValueError(f"invalid truncation strategy: {self.truncation_side}, use 'left' or 'right'.") | |
else: | |
error_msg = ( | |
f"We need to remove {num_tokens_to_remove} to truncate the input " | |
f"but the first sequence has a length {len(ids)}. " | |
) | |
if truncation_strategy == TruncationStrategy.ONLY_FIRST: | |
error_msg = ( | |
error_msg + "Please select another truncation strategy than " | |
f"{truncation_strategy}, for instance 'longest_first' or 'only_second'." | |
) | |
logger.error(error_msg) | |
elif truncation_strategy == TruncationStrategy.LONGEST_FIRST: | |
logger.warning( | |
"Be aware, overflowing tokens are not returned for the setting you have chosen," | |
f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' " | |
"truncation strategy. So the returned list will always be empty even if some " | |
"tokens have been removed." | |
) | |
len_pair_ids = len(pair_ids) if pair_ids is not None else 0 | |
len_ids = len(ids) | |
first_remove = min(abs(len_pair_ids - len_ids), num_tokens_to_remove) | |
second_remove = num_tokens_to_remove - first_remove | |
if len_ids > len_pair_ids: | |
ids_to_move = first_remove + second_remove // 2 | |
pair_ids_to_move = second_remove - second_remove // 2 | |
else: | |
ids_to_move = second_remove // 2 | |
pair_ids_to_move = first_remove + second_remove - (second_remove // 2) | |
if self.truncation_side == "right": | |
ids = ids[:-ids_to_move] if ids_to_move > 0 else ids | |
pair_ids = pair_ids[:-pair_ids_to_move] if pair_ids is not None and pair_ids_to_move > 0 else pair_ids | |
elif self.truncation_side == "left": | |
ids = ids[ids_to_move:] | |
pair_ids = pair_ids[pair_ids_to_move:] if pair_ids is not None else None | |
else: | |
raise ValueError(f"invalid truncation strategy:{self.truncation_side}") | |
elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: | |
if len(pair_ids) > num_tokens_to_remove: | |
window_len = min(len(pair_ids), stride + num_tokens_to_remove) | |
if self.truncation_side == "right": | |
overflowing_tokens = pair_ids[-window_len:] | |
pair_ids = pair_ids[:-num_tokens_to_remove] | |
elif self.truncation_side == "left": | |
overflowing_tokens = pair_ids[:window_len] | |
pair_ids = pair_ids[num_tokens_to_remove:] | |
else: | |
raise ValueError(f"invalid truncation strategy:{self.truncation_side}") | |
else: | |
logger.error( | |
f"We need to remove {num_tokens_to_remove} to truncate the input " | |
f"but the second sequence has a length {len(pair_ids)}. " | |
f"Please select another truncation strategy than {truncation_strategy}, " | |
"for instance 'longest_first' or 'only_first'." | |
) | |
return (ids, pair_ids, overflowing_tokens) | |
def _pad( | |
self, | |
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], | |
max_length: Optional[int] = None, | |
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, | |
pad_to_multiple_of: Optional[int] = None, | |
return_attention_mask: Optional[bool] = None, | |
) -> dict: | |
""" | |
Pad encoded inputs (on left/right and up to predefined length or max length in the batch) | |
Args: | |
encoded_inputs: | |
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). | |
max_length: maximum length of the returned list and optionally padding length (see below). | |
Will truncate by taking into account the special tokens. | |
padding_strategy: PaddingStrategy to use for padding. | |
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch | |
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default) | |
- PaddingStrategy.DO_NOT_PAD: Do not pad | |
The tokenizer padding sides are defined in self.padding_side: | |
- 'left': pads on the left of the sequences | |
- 'right': pads on the right of the sequences | |
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. | |
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability | |
`>= 7.5` (Volta). | |
return_attention_mask: | |
(optional) Set to False to avoid returning attention mask (default: set to model specifics) | |
""" | |
# Load from model defaults | |
if return_attention_mask is None: | |
return_attention_mask = "attention_mask" in self.model_input_names | |
required_input = encoded_inputs[self.model_input_names[0]] | |
if padding_strategy == PaddingStrategy.LONGEST: | |
max_length = len(required_input) | |
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): | |
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of | |
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length | |
# Initialize attention mask if not present. | |
if return_attention_mask and "attention_mask" not in encoded_inputs: | |
encoded_inputs["attention_mask"] = [1] * len(required_input) | |
if needs_to_be_padded: | |
difference = max_length - len(required_input) | |
if self.padding_side == "right": | |
if return_attention_mask: | |
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference | |
if "token_type_ids" in encoded_inputs: | |
encoded_inputs["token_type_ids"] = ( | |
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference | |
) | |
if "special_tokens_mask" in encoded_inputs: | |
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference | |
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference | |
elif self.padding_side == "left": | |
if return_attention_mask: | |
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] | |
if "token_type_ids" in encoded_inputs: | |
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ | |
"token_type_ids" | |
] | |
if "special_tokens_mask" in encoded_inputs: | |
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] | |
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input | |
else: | |
raise ValueError(f"Invalid padding strategy:{self.padding_side}") | |
return encoded_inputs | |
def convert_tokens_to_string(self, tokens: List[str]) -> str: | |
""" | |
Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we | |
often want to remove sub-word tokenization artifacts at the same time. | |
Args: | |
tokens (`List[str]`): The token to join in a string. | |
Returns: | |
`str`: The joined tokens. | |
""" | |
raise NotImplementedError | |
def batch_decode( | |
self, | |
sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"], | |
skip_special_tokens: bool = False, | |
clean_up_tokenization_spaces: bool = None, | |
**kwargs, | |
) -> List[str]: | |
""" | |
Convert a list of lists of token ids into a list of strings by calling decode. | |
Args: | |
sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`): | |
List of tokenized input ids. Can be obtained using the `__call__` method. | |
skip_special_tokens (`bool`, *optional*, defaults to `False`): | |
Whether or not to remove special tokens in the decoding. | |
clean_up_tokenization_spaces (`bool`, *optional*): | |
Whether or not to clean up the tokenization spaces. If `None`, will default to | |
`self.clean_up_tokenization_spaces`. | |
kwargs (additional keyword arguments, *optional*): | |
Will be passed to the underlying model specific decode method. | |
Returns: | |
`List[str]`: The list of decoded sentences. | |
""" | |
return [ | |
self.decode( | |
seq, | |
skip_special_tokens=skip_special_tokens, | |
clean_up_tokenization_spaces=clean_up_tokenization_spaces, | |
**kwargs, | |
) | |
for seq in sequences | |
] | |
def decode( | |
self, | |
token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], | |
skip_special_tokens: bool = False, | |
clean_up_tokenization_spaces: bool = None, | |
**kwargs, | |
) -> str: | |
""" | |
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special | |
tokens and clean up tokenization spaces. | |
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. | |
Args: | |
token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): | |
List of tokenized input ids. Can be obtained using the `__call__` method. | |
skip_special_tokens (`bool`, *optional*, defaults to `False`): | |
Whether or not to remove special tokens in the decoding. | |
clean_up_tokenization_spaces (`bool`, *optional*): | |
Whether or not to clean up the tokenization spaces. If `None`, will default to | |
`self.clean_up_tokenization_spaces`. | |
kwargs (additional keyword arguments, *optional*): | |
Will be passed to the underlying model specific decode method. | |
Returns: | |
`str`: The decoded sentence. | |
""" | |
# Convert inputs to python lists | |
token_ids = to_py_obj(token_ids) | |
return self._decode( | |
token_ids=token_ids, | |
skip_special_tokens=skip_special_tokens, | |
clean_up_tokenization_spaces=clean_up_tokenization_spaces, | |
**kwargs, | |
) | |
def _decode( | |
self, | |
token_ids: Union[int, List[int]], | |
skip_special_tokens: bool = False, | |
clean_up_tokenization_spaces: bool = None, | |
**kwargs, | |
) -> str: | |
raise NotImplementedError | |
def get_special_tokens_mask( | |
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False | |
) -> List[int]: | |
""" | |
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding | |
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. | |
Args: | |
token_ids_0 (`List[int]`): | |
List of ids of the first sequence. | |
token_ids_1 (`List[int]`, *optional*): | |
List of ids of the second sequence. | |
already_has_special_tokens (`bool`, *optional*, defaults to `False`): | |
Whether or not the token list is already formatted with special tokens for the model. | |
Returns: | |
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. | |
""" | |
assert already_has_special_tokens and token_ids_1 is None, ( | |
"You cannot use ``already_has_special_tokens=False`` with this tokenizer. " | |
"Please use a slow (full python) tokenizer to activate this argument. " | |
"Or set `return_special_tokens_mask=True` when calling the encoding method " | |
"to get the special tokens mask in any tokenizer. " | |
) | |
all_special_ids = self.all_special_ids # cache the property | |
special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0] | |
return special_tokens_mask | |
def clean_up_tokenization(out_string: str) -> str: | |
""" | |
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms. | |
Args: | |
out_string (`str`): The text to clean up. | |
Returns: | |
`str`: The cleaned-up string. | |
""" | |
out_string = ( | |
out_string.replace(" .", ".") | |
.replace(" ?", "?") | |
.replace(" !", "!") | |
.replace(" ,", ",") | |
.replace(" ' ", "'") | |
.replace(" n't", "n't") | |
.replace(" 'm", "'m") | |
.replace(" 's", "'s") | |
.replace(" 've", "'ve") | |
.replace(" 're", "'re") | |
) | |
return out_string | |
def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool): | |
""" | |
Depending on the input and internal state we might trigger a warning about a sequence that is too long for its | |
corresponding model | |
Args: | |
ids (`List[str]`): The ids produced by the tokenization | |
max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set) | |
verbose (`bool`): Whether or not to print more information and warnings. | |
""" | |
if max_length is None and len(ids) > self.model_max_length and verbose: | |
if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False): | |
logger.warning( | |
"Token indices sequence length is longer than the specified maximum sequence length " | |
f"for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model " | |
"will result in indexing errors" | |
) | |
self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True | |
def _switch_to_input_mode(self): | |
""" | |
Private method to put the tokenizer in input mode (when it has different modes for input/outputs) | |
""" | |
pass | |
def _switch_to_target_mode(self): | |
""" | |
Private method to put the tokenizer in target mode (when it has different modes for input/outputs) | |
""" | |
pass | |
def as_target_tokenizer(self): | |
""" | |
Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to | |
sequence-to-sequence models that need a slightly different processing for the labels. | |
""" | |
warnings.warn( | |
"`as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your " | |
"labels by using the argument `text_target` of the regular `__call__` method (either in the same call as " | |
"your input texts if you use the same keyword arguments, or in a separate call." | |
) | |
self._switch_to_target_mode() | |
self._in_target_context_manager = True | |
yield | |
self._in_target_context_manager = False | |
self._switch_to_input_mode() | |
def register_for_auto_class(cls, auto_class="AutoTokenizer"): | |
""" | |
Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the | |
library are already mapped with `AutoTokenizer`. | |
<Tip warning={true}> | |
This API is experimental and may have some slight breaking changes in the next releases. | |
</Tip> | |
Args: | |
auto_class (`str` or `type`, *optional*, defaults to `"AutoTokenizer"`): | |
The auto class to register this new tokenizer with. | |
""" | |
if not isinstance(auto_class, str): | |
auto_class = auto_class.__name__ | |
import transformers.models.auto as auto_module | |
if not hasattr(auto_module, auto_class): | |
raise ValueError(f"{auto_class} is not a valid auto class.") | |
cls._auto_class = auto_class | |
def prepare_seq2seq_batch( | |
self, | |
src_texts: List[str], | |
tgt_texts: Optional[List[str]] = None, | |
max_length: Optional[int] = None, | |
max_target_length: Optional[int] = None, | |
padding: str = "longest", | |
return_tensors: str = None, | |
truncation: bool = True, | |
**kwargs, | |
) -> BatchEncoding: | |
""" | |
Prepare model inputs for translation. For best performance, translate one sentence at a time. | |
Arguments: | |
src_texts (`List[str]`): | |
List of documents to summarize or source language texts. | |
tgt_texts (`list`, *optional*): | |
List of summaries or target language texts. | |
max_length (`int`, *optional*): | |
Controls the maximum length for encoder inputs (documents to summarize or source language texts) If | |
left unset or set to `None`, this will use the predefined model maximum length if a maximum length is | |
required by one of the truncation/padding parameters. If the model has no specific maximum input length | |
(like XLNet) truncation/padding to a maximum length will be deactivated. | |
max_target_length (`int`, *optional*): | |
Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set | |
to `None`, this will use the max_length value. | |
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): | |
Activates and controls padding. Accepts the following values: | |
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single | |
sequence if provided). | |
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum | |
acceptable input length for the model if that argument is not provided. | |
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different | |
lengths). | |
return_tensors (`str` or [`~utils.TensorType`], *optional*): | |
If set, will return tensors instead of list of python integers. Acceptable values are: | |
- `'tf'`: Return TensorFlow `tf.constant` objects. | |
- `'pt'`: Return PyTorch `torch.Tensor` objects. | |
- `'np'`: Return Numpy `np.ndarray` objects. | |
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `True`): | |
Activates and controls truncation. Accepts the following values: | |
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or | |
to the maximum acceptable input length for the model if that argument is not provided. This will | |
truncate token by token, removing a token from the longest sequence in the pair if a pair of | |
sequences (or a batch of pairs) is provided. | |
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the | |
maximum acceptable input length for the model if that argument is not provided. This will only | |
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. | |
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the | |
maximum acceptable input length for the model if that argument is not provided. This will only | |
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. | |
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths | |
greater than the model maximum admissible input size). | |
**kwargs: | |
Additional keyword arguments passed along to `self.__call__`. | |
Return: | |
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields: | |
- **input_ids** -- List of token ids to be fed to the encoder. | |
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model. | |
- **labels** -- List of token ids for tgt_texts. | |
The full set of keys `[input_ids, attention_mask, labels]`, will only be returned if tgt_texts is passed. | |
Otherwise, input_ids, attention_mask will be the only keys. | |
""" | |
# docstyle-ignore | |
formatted_warning = """ | |
`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of HuggingFace Transformers. Use the regular | |
`__call__` method to prepare your inputs and targets. | |
Here is a short example: | |
model_inputs = tokenizer(src_texts, text_target=tgt_texts, ...) | |
If you either need to use different keyword arguments for the source and target texts, you should do two calls like | |
this: | |
model_inputs = tokenizer(src_texts, ...) | |
labels = tokenizer(text_target=tgt_texts, ...) | |
model_inputs["labels"] = labels["input_ids"] | |
See the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice. | |
For a more complete example, see the implementation of `prepare_seq2seq_batch`. | |
""" | |
warnings.warn(formatted_warning, FutureWarning) | |
# mBART-specific kwargs that should be ignored by other models. | |
kwargs.pop("src_lang", None) | |
kwargs.pop("tgt_lang", None) | |
if max_length is None: | |
max_length = self.model_max_length | |
model_inputs = self( | |
src_texts, | |
add_special_tokens=True, | |
return_tensors=return_tensors, | |
max_length=max_length, | |
padding=padding, | |
truncation=truncation, | |
**kwargs, | |
) | |
if tgt_texts is None: | |
return model_inputs | |
# Process tgt_texts | |
if max_target_length is None: | |
max_target_length = max_length | |
with self.as_target_tokenizer(): | |
labels = self( | |
tgt_texts, | |
add_special_tokens=True, | |
return_tensors=return_tensors, | |
padding=padding, | |
max_length=max_target_length, | |
truncation=truncation, | |
**kwargs, | |
) | |
model_inputs["labels"] = labels["input_ids"] | |
return model_inputs | |
def get_fast_tokenizer_file(tokenization_files: List[str]) -> str: | |
""" | |
Get the tokenization file to use for this version of transformers. | |
Args: | |
tokenization_files (`List[str]`): The list of available configuration files. | |
Returns: | |
`str`: The tokenization file to use. | |
""" | |
tokenizer_files_map = {} | |
for file_name in tokenization_files: | |
search = _re_tokenizer_file.search(file_name) | |
if search is not None: | |
v = search.groups()[0] | |
tokenizer_files_map[v] = file_name | |
available_versions = sorted(tokenizer_files_map.keys()) | |
# Defaults to FULL_TOKENIZER_FILE and then try to look at some newer versions. | |
tokenizer_file = FULL_TOKENIZER_FILE | |
transformers_version = version.parse(__version__) | |
for v in available_versions: | |
if version.parse(v) <= transformers_version: | |
tokenizer_file = tokenizer_files_map[v] | |
else: | |
# No point going further since the versions are sorted. | |
break | |
return tokenizer_file | |
# To update the docstring, we need to copy the method, otherwise we change the original docstring. | |
PreTrainedTokenizerBase.push_to_hub = copy_func(PreTrainedTokenizerBase.push_to_hub) | |
if PreTrainedTokenizerBase.push_to_hub.__doc__ is not None: | |
PreTrainedTokenizerBase.push_to_hub.__doc__ = PreTrainedTokenizerBase.push_to_hub.__doc__.format( | |
object="tokenizer", object_class="AutoTokenizer", object_files="tokenizer files" | |
) | |