repo_id
stringlengths
15
86
file_path
stringlengths
28
180
content
stringlengths
1
1.75M
__index_level_0__
int64
0
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_decoders.py
import json import pickle import pytest from tokenizers.decoders import ( CTC, BPEDecoder, ByteLevel, Decoder, Metaspace, Sequence, WordPiece, ByteFallback, Replace, Strip, Fuse, ) class TestByteLevel: def test_instantiate(self): assert ByteLevel() is not None assert isinstance(ByteLevel(), Decoder) assert isinstance(ByteLevel(), ByteLevel) assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel) def test_decoding(self): decoder = ByteLevel() assert decoder.decode(["My", "Ġname", "Ġis", "ĠJohn"]) == "My name is John" def test_manual_reload(self): byte_level = ByteLevel() state = json.loads(byte_level.__getstate__()) reloaded = ByteLevel(**state) assert isinstance(reloaded, ByteLevel) class TestReplace: def test_instantiate(self): assert Replace("_", " ") is not None assert isinstance(Replace("_", " "), Decoder) assert isinstance(Replace("_", " "), Replace) # assert isinstance(pickle.loads(pickle.dumps(Replace("_", " "))), Replace) def test_decoding(self): decoder = Replace("_", " ") assert decoder.decode(["My", "_name", "_is", "_John"]) == "My name is John" class TestWordPiece: def test_instantiate(self): assert WordPiece() is not None assert WordPiece(prefix="__") is not None assert WordPiece(cleanup=True) is not None assert isinstance(WordPiece(), Decoder) assert isinstance(WordPiece(), WordPiece) assert isinstance(pickle.loads(pickle.dumps(WordPiece())), WordPiece) def test_decoding(self): decoder = WordPiece() assert decoder.decode(["My", "na", "##me", "is", "Jo", "##hn"]) == "My name is John" assert decoder.decode(["I", "'m", "Jo", "##hn"]) == "I'm John" decoder = WordPiece(prefix="__", cleanup=False) assert decoder.decode(["My", "na", "__me", "is", "Jo", "__hn"]) == "My name is John" assert decoder.decode(["I", "'m", "Jo", "__hn"]) == "I 'm John" def test_can_modify(self): decoder = WordPiece(prefix="$$", cleanup=False) assert decoder.prefix == "$$" assert decoder.cleanup == False # Modify these decoder.prefix = "__" assert decoder.prefix == "__" decoder.cleanup = True assert decoder.cleanup == True class TestByteFallback: def test_instantiate(self): assert ByteFallback() is not None assert isinstance(ByteFallback(), Decoder) assert isinstance(ByteFallback(), ByteFallback) assert isinstance(pickle.loads(pickle.dumps(ByteFallback())), ByteFallback) def test_decoding(self): decoder = ByteFallback() assert decoder.decode(["My", " na", "me"]) == "My name" assert decoder.decode(["<0x61>"]) == "a" assert decoder.decode(["<0xE5>"]) == "�" assert decoder.decode(["<0xE5>", "<0x8f>"]) == "��" assert decoder.decode(["<0xE5>", "<0x8f>", "<0xab>"]) == "叫" assert decoder.decode(["<0xE5>", "<0x8f>", "a"]) == "��a" assert decoder.decode(["<0xE5>", "<0x8f>", "<0xab>", "a"]) == "叫a" class TestFuse: def test_instantiate(self): assert Fuse() is not None assert isinstance(Fuse(), Decoder) assert isinstance(Fuse(), Fuse) assert isinstance(pickle.loads(pickle.dumps(Fuse())), Fuse) def test_decoding(self): decoder = Fuse() assert decoder.decode(["My", " na", "me"]) == "My name" class TestStrip: def test_instantiate(self): assert Strip(left=0, right=0) is not None assert isinstance(Strip(content="_", left=0, right=0), Decoder) assert isinstance(Strip(content="_", left=0, right=0), Strip) assert isinstance(pickle.loads(pickle.dumps(Strip(content="_", left=0, right=0))), Strip) def test_decoding(self): decoder = Strip(content="_", left=1, right=0) assert decoder.decode(["_My", " na", "me", " _-", "__-"]) == "My name _-_-" class TestMetaspace: def test_instantiate(self): assert Metaspace() is not None assert Metaspace(replacement="-") is not None with pytest.raises(ValueError, match="expected a string of length 1"): Metaspace(replacement="") assert Metaspace(add_prefix_space=True) is not None assert isinstance(Metaspace(), Decoder) assert isinstance(Metaspace(), Metaspace) assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace) def test_decoding(self): decoder = Metaspace() assert decoder.decode(["▁My", "▁name", "▁is", "▁John"]) == "My name is John" decoder = Metaspace(replacement="-", add_prefix_space=False) assert decoder.decode(["-My", "-name", "-is", "-John"]) == " My name is John" def test_can_modify(self): decoder = Metaspace(replacement="*", add_prefix_space=False) assert decoder.replacement == "*" assert decoder.add_prefix_space == False # Modify these decoder.replacement = "&" assert decoder.replacement == "&" decoder.add_prefix_space = True assert decoder.add_prefix_space == True class TestBPEDecoder: def test_instantiate(self): assert BPEDecoder() is not None assert BPEDecoder(suffix="_") is not None assert isinstance(BPEDecoder(), Decoder) assert isinstance(BPEDecoder(), BPEDecoder) assert isinstance(pickle.loads(pickle.dumps(BPEDecoder())), BPEDecoder) def test_decoding(self): decoder = BPEDecoder() assert decoder.decode(["My</w>", "na", "me</w>", "is</w>", "Jo", "hn</w>"]) == "My name is John" decoder = BPEDecoder(suffix="_") assert decoder.decode(["My_", "na", "me_", "is_", "Jo", "hn_"]) == "My name is John" def test_can_modify(self): decoder = BPEDecoder(suffix="123") assert decoder.suffix == "123" # Modify these decoder.suffix = "</w>" assert decoder.suffix == "</w>" class TestCTCDecoder: def test_instantiate(self): assert CTC() is not None assert CTC(pad_token="[PAD]") is not None assert isinstance(CTC(), Decoder) assert isinstance(CTC(), CTC) assert isinstance(pickle.loads(pickle.dumps(CTC())), CTC) def test_decoding(self): decoder = CTC() assert ( decoder.decode(["<pad>", "<pad>", "h", "e", "e", "l", "l", "<pad>", "l", "o", "o", "o", "<pad>"]) == "hello" ) decoder = CTC(pad_token="[PAD]") assert ( decoder.decode(["[PAD]", "[PAD]", "h", "e", "e", "l", "l", "[PAD]", "l", "o", "o", "o", "[PAD]"]) == "hello" ) def test_can_modify(self): decoder = CTC(pad_token="[PAD]") assert decoder.pad_token == "[PAD]" assert decoder.word_delimiter_token == "|" assert decoder.cleanup == True # Modify these decoder.pad_token = "{pad}" assert decoder.pad_token == "{pad}" decoder.word_delimiter_token = "_" assert decoder.word_delimiter_token == "_" decoder.cleanup = False assert decoder.cleanup == False class TestSequenceDecoder: def test_instantiate(self): assert Sequence([]) is not None assert Sequence([CTC()]) is not None assert isinstance(Sequence([]), Decoder) assert isinstance(Sequence([]), Sequence) serialized = pickle.dumps(Sequence([])) assert isinstance(pickle.loads(serialized), Sequence) def test_decoding(self): decoder = Sequence([CTC(), Metaspace()]) initial = ["▁", "▁", "H", "H", "i", "i", "▁", "y", "o", "u"] expected = "Hi you" assert decoder.decode(initial) == expected
0
hf_public_repos/tokenizers/bindings/python/py_src
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/__init__.py
__version__ = "0.13.3" from enum import Enum from typing import List, Tuple, Union Offsets = Tuple[int, int] TextInputSequence = str """A :obj:`str` that represents an input sequence """ PreTokenizedInputSequence = Union[List[str], Tuple[str]] """A pre-tokenized input sequence. Can be one of: - A :obj:`List` of :obj:`str` - A :obj:`Tuple` of :obj:`str` """ TextEncodeInput = Union[ TextInputSequence, Tuple[TextInputSequence, TextInputSequence], List[TextInputSequence], ] """Represents a textual input for encoding. Can be either: - A single sequence: :data:`~tokenizers.TextInputSequence` - A pair of sequences: - A :obj:`Tuple` of :data:`~tokenizers.TextInputSequence` - Or a :obj:`List` of :data:`~tokenizers.TextInputSequence` of size 2 """ PreTokenizedEncodeInput = Union[ PreTokenizedInputSequence, Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence], List[PreTokenizedInputSequence], ] """Represents a pre-tokenized input for encoding. Can be either: - A single sequence: :data:`~tokenizers.PreTokenizedInputSequence` - A pair of sequences: - A :obj:`Tuple` of :data:`~tokenizers.PreTokenizedInputSequence` - Or a :obj:`List` of :data:`~tokenizers.PreTokenizedInputSequence` of size 2 """ InputSequence = Union[TextInputSequence, PreTokenizedInputSequence] """Represents all the possible types of input sequences for encoding. Can be: - When ``is_pretokenized=False``: :data:`~TextInputSequence` - When ``is_pretokenized=True``: :data:`~PreTokenizedInputSequence` """ EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput] """Represents all the possible types of input for encoding. Can be: - When ``is_pretokenized=False``: :data:`~TextEncodeInput` - When ``is_pretokenized=True``: :data:`~PreTokenizedEncodeInput` """ class OffsetReferential(Enum): ORIGINAL = "original" NORMALIZED = "normalized" class OffsetType(Enum): BYTE = "byte" CHAR = "char" class SplitDelimiterBehavior(Enum): REMOVED = "removed" ISOLATED = "isolated" MERGED_WITH_PREVIOUS = "merged_with_previous" MERGED_WITH_NEXT = "merged_with_next" CONTIGUOUS = "contiguous" from .tokenizers import ( AddedToken, Encoding, NormalizedString, PreTokenizedString, Regex, Token, Tokenizer, decoders, models, normalizers, pre_tokenizers, processors, trainers, ) from .implementations import ( BertWordPieceTokenizer, ByteLevelBPETokenizer, CharBPETokenizer, SentencePieceBPETokenizer, SentencePieceUnigramTokenizer, )
0
hf_public_repos/tokenizers/bindings/python/py_src
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/__init__.pyi
# Generated content DO NOT EDIT class AddedToken: """ Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`. It can have special options that defines the way it should behave. Args: content (:obj:`str`): The content of the token single_word (:obj:`bool`, defaults to :obj:`False`): Defines whether this token should only match single words. If :obj:`True`, this token will never match inside of a word. For example the token ``ing`` would match on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`. The notion of "`inside of a word`" is defined by the word boundaries pattern in regular expressions (ie. the token should start and end with word boundaries). lstrip (:obj:`bool`, defaults to :obj:`False`): Defines whether this token should strip all potential whitespaces on its left side. If :obj:`True`, this token will greedily match any whitespace on its left. For example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left). rstrip (:obj:`bool`, defaults to :obj:`False`): Defines whether this token should strip all potential whitespaces on its right side. If :obj:`True`, this token will greedily match any whitespace on its right. It works just like :obj:`lstrip` but on the right. normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): Defines whether this token should match against the normalized version of the input text. For example, with the added token ``"yesterday"``, and a normalizer in charge of lowercasing the text, the token could be extract from the input ``"I saw a lion Yesterday"``. """ def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True): pass @property def content(self): """ Get the content of this :obj:`AddedToken` """ pass @property def lstrip(self): """ Get the value of the :obj:`lstrip` option """ pass @property def normalized(self): """ Get the value of the :obj:`normalized` option """ pass @property def rstrip(self): """ Get the value of the :obj:`rstrip` option """ pass @property def single_word(self): """ Get the value of the :obj:`single_word` option """ pass class Encoding: """ The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`. """ @property def attention_mask(self): """ The attention mask This indicates to the LM which tokens should be attended to, and which should not. This is especially important when batching sequences, where we need to applying padding. Returns: :obj:`List[int]`: The attention mask """ pass def char_to_token(self, char_pos, sequence_index=0): """ Get the token that contains the char at the given position in the input sequence. Args: char_pos (:obj:`int`): The position of a char in the input string sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target char Returns: :obj:`int`: The index of the token that contains this char in the encoded sequence """ pass def char_to_word(self, char_pos, sequence_index=0): """ Get the word that contains the char at the given position in the input sequence. Args: char_pos (:obj:`int`): The position of a char in the input string sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target char Returns: :obj:`int`: The index of the word that contains this char in the input sequence """ pass @property def ids(self): """ The generated IDs The IDs are the main input to a Language Model. They are the token indices, the numerical representations that a LM understands. Returns: :obj:`List[int]`: The list of IDs """ pass @staticmethod def merge(encodings, growing_offsets=True): """ Merge the list of encodings into one final :class:`~tokenizers.Encoding` Args: encodings (A :obj:`List` of :class:`~tokenizers.Encoding`): The list of encodings that should be merged in one growing_offsets (:obj:`bool`, defaults to :obj:`True`): Whether the offsets should accumulate while merging Returns: :class:`~tokenizers.Encoding`: The resulting Encoding """ pass @property def n_sequences(self): """ The number of sequences represented Returns: :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding` """ pass @property def offsets(self): """ The offsets associated to each token These offsets let's you slice the input string, and thus retrieve the original part that led to producing the corresponding token. Returns: A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets """ pass @property def overflowing(self): """ A :obj:`List` of overflowing :class:`~tokenizers.Encoding` When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting the output into as many pieces as required to match the specified maximum length. This field lets you retrieve all the subsequent pieces. When you use pairs of sequences, the overflowing pieces will contain enough variations to cover all the possible combinations, while respecting the provided maximum length. """ pass def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"): """ Pad the :class:`~tokenizers.Encoding` at the given length Args: length (:obj:`int`): The desired length direction: (:obj:`str`, defaults to :obj:`right`): The expected padding direction. Can be either :obj:`right` or :obj:`left` pad_id (:obj:`int`, defaults to :obj:`0`): The ID corresponding to the padding token pad_type_id (:obj:`int`, defaults to :obj:`0`): The type ID corresponding to the padding token pad_token (:obj:`str`, defaults to `[PAD]`): The pad token to use """ pass @property def sequence_ids(self): """ The generated sequence indices. They represent the index of the input sequence associated to each token. The sequence id can be None if the token is not related to any input sequence, like for example with special tokens. Returns: A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index. """ pass def set_sequence_id(self, sequence_id): """ Set the given sequence index Set the given sequence index for the whole range of tokens contained in this :class:`~tokenizers.Encoding`. """ pass @property def special_tokens_mask(self): """ The special token mask This indicates which tokens are special tokens, and which are not. Returns: :obj:`List[int]`: The special tokens mask """ pass def token_to_chars(self, token_index): """ Get the offsets of the token at the given index. The returned offsets are related to the input sequence that contains the token. In order to determine in which input sequence it belongs, you must call :meth:`~tokenizers.Encoding.token_to_sequence()`. Args: token_index (:obj:`int`): The index of a token in the encoded sequence. Returns: :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)` """ pass def token_to_sequence(self, token_index): """ Get the index of the sequence represented by the given token. In the general use case, this method returns :obj:`0` for a single sequence or the first sequence of a pair, and :obj:`1` for the second sequence of a pair Args: token_index (:obj:`int`): The index of a token in the encoded sequence. Returns: :obj:`int`: The sequence id of the given token """ pass def token_to_word(self, token_index): """ Get the index of the word that contains the token in one of the input sequences. The returned word index is related to the input sequence that contains the token. In order to determine in which input sequence it belongs, you must call :meth:`~tokenizers.Encoding.token_to_sequence()`. Args: token_index (:obj:`int`): The index of a token in the encoded sequence. Returns: :obj:`int`: The index of the word in the relevant input sequence. """ pass @property def tokens(self): """ The generated tokens They are the string representation of the IDs. Returns: :obj:`List[str]`: The list of tokens """ pass def truncate(self, max_length, stride=0, direction="right"): """ Truncate the :class:`~tokenizers.Encoding` at the given length If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating this information is lost. It will be considered as representing a single sequence. Args: max_length (:obj:`int`): The desired length stride (:obj:`int`, defaults to :obj:`0`): The length of previous content to be included in each overflowing piece direction (:obj:`str`, defaults to :obj:`right`): Truncate direction """ pass @property def type_ids(self): """ The generated type IDs Generally used for tasks like sequence classification or question answering, these tokens let the LM know which input sequence corresponds to each tokens. Returns: :obj:`List[int]`: The list of type ids """ pass @property def word_ids(self): """ The generated word indices. They represent the index of the word associated to each token. When the input is pre-tokenized, they correspond to the ID of the given input label, otherwise they correspond to the words indices as defined by the :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. For special tokens and such (any token that was generated from something that was not part of the input), the output is :obj:`None` Returns: A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. """ pass def word_to_chars(self, word_index, sequence_index=0): """ Get the offsets of the word at the given index in one of the input sequences. Args: word_index (:obj:`int`): The index of a word in one of the input sequences. sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target word Returns: :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)` """ pass def word_to_tokens(self, word_index, sequence_index=0): """ Get the encoded tokens corresponding to the word at the given index in one of the input sequences. Args: word_index (:obj:`int`): The index of a word in one of the input sequences. sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target word Returns: :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)` """ pass @property def words(self): """ The generated word indices. .. warning:: This is deprecated and will be removed in a future version. Please use :obj:`~tokenizers.Encoding.word_ids` instead. They represent the index of the word associated to each token. When the input is pre-tokenized, they correspond to the ID of the given input label, otherwise they correspond to the words indices as defined by the :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. For special tokens and such (any token that was generated from something that was not part of the input), the output is :obj:`None` Returns: A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. """ pass class NormalizedString: """ NormalizedString A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one. While making all the requested modifications, it keeps track of the alignment information between the two versions of the string. Args: sequence: str: The string sequence used to initialize this NormalizedString """ def append(self, s): """ Append the given sequence to the string """ pass def clear(self): """ Clears the string """ pass def filter(self, func): """ Filter each character of the string using the given func """ pass def for_each(self, func): """ Calls the given function for each character of the string """ pass def lowercase(self): """ Lowercase the string """ pass def lstrip(self): """ Strip the left of the string """ pass def map(self, func): """ Calls the given function for each character of the string Replaces each character of the string using the returned value. Each returned value **must** be a str of length 1 (ie a character). """ pass def nfc(self): """ Runs the NFC normalization """ pass def nfd(self): """ Runs the NFD normalization """ pass def nfkc(self): """ Runs the NFKC normalization """ pass def nfkd(self): """ Runs the NFKD normalization """ pass @property def normalized(self): """ The normalized part of the string """ pass def prepend(self, s): """ Prepend the given sequence to the string """ pass def replace(self, pattern, content): """ Replace the content of the given pattern with the provided content Args: pattern: Pattern: A pattern used to match the string. Usually a string or a Regex content: str: The content to be used as replacement """ pass def rstrip(self): """ Strip the right of the string """ pass def slice(self, range): """ Slice the string using the given range """ pass def split(self, pattern, behavior): """ Split the NormalizedString using the given pattern and the specified behavior Args: pattern: Pattern: A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex` behavior: SplitDelimiterBehavior: The behavior to use when splitting. Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", "contiguous" Returns: A list of NormalizedString, representing each split """ pass def strip(self): """ Strip both ends of the string """ pass def uppercase(self): """ Uppercase the string """ pass class PreTokenizedString: """ PreTokenizedString Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the underlying string, while keeping track of the alignment information (offsets). The PreTokenizedString manages what we call `splits`. Each split represents a substring which is a subpart of the original string, with the relevant offsets and tokens. When calling one of the methods used to modify the PreTokenizedString (namely one of `split`, `normalize` or `tokenize), only the `splits` that don't have any associated tokens will get modified. Args: sequence: str: The string sequence used to initialize this PreTokenizedString """ def __init__(self, sequence): pass def get_splits(self, offset_referential="original", offset_type="char"): """ Get the splits currently managed by the PreTokenizedString Args: offset_referential: :obj:`str` Whether the returned splits should have offsets expressed relative to the original string, or the normalized one. choices: "original", "normalized". offset_type: :obj:`str` Whether the returned splits should have offsets expressed in bytes or chars. When slicing an str, we usually want to use chars, which is the default value. Now in some cases it might be interesting to get these offsets expressed in bytes, so it is possible to change this here. choices: "char", "bytes" Returns A list of splits """ pass def normalize(self, func): """ Normalize each split of the `PreTokenizedString` using the given `func` Args: func: Callable[[NormalizedString], None]: The function used to normalize each underlying split. This function does not need to return anything, just calling the methods on the provided NormalizedString allow its modification. """ pass def split(self, func): """ Split the PreTokenizedString using the given `func` Args: func: Callable[[index, NormalizedString], List[NormalizedString]]: The function used to split each underlying split. It is expected to return a list of `NormalizedString`, that represent the new splits. If the given `NormalizedString` does not need any splitting, we can just return it directly. In order for the offsets to be tracked accurately, any returned `NormalizedString` should come from calling either `.split` or `.slice` on the received one. """ pass def to_encoding(self, type_id=0, word_idx=None): """ Return an Encoding generated from this PreTokenizedString Args: type_id: int = 0: The type_id to be used on the generated Encoding. word_idx: Optional[int] = None: An optional word index to be used for each token of this Encoding. If provided, all the word indices in the generated Encoding will use this value, instead of the one automatically tracked during pre-tokenization. Returns: An Encoding """ pass def tokenize(self, func): """ Tokenize each split of the `PreTokenizedString` using the given `func` Args: func: Callable[[str], List[Token]]: The function used to tokenize each underlying split. This function must return a list of Token generated from the input str. """ pass class Regex: """ Instantiate a new Regex with the given pattern """ def __init__(self, pattern): pass class Token: pass class Tokenizer: """ A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input and outputs an :class:`~tokenizers.Encoding`. Args: model (:class:`~tokenizers.models.Model`): The core algorithm that this :obj:`Tokenizer` should be using. """ def __init__(self, model): pass def add_special_tokens(self, tokens): """ Add the given special tokens to the Tokenizer. If these tokens are already part of the vocabulary, it just let the Tokenizer know about them. If they don't exist, the Tokenizer creates them, giving them a new id. These special tokens will never be processed by the model (ie won't be split into multiple tokens), and they can be removed from the output when decoding. Args: tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): The list of special tokens we want to add to the vocabulary. Each token can either be a string or an instance of :class:`~tokenizers.AddedToken` for more customization. Returns: :obj:`int`: The number of tokens that were created in the vocabulary """ pass def add_tokens(self, tokens): """ Add the given tokens to the vocabulary The given tokens are added only if they don't already exist in the vocabulary. Each token then gets a new attributed id. Args: tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): The list of tokens we want to add to the vocabulary. Each token can be either a string or an instance of :class:`~tokenizers.AddedToken` for more customization. Returns: :obj:`int`: The number of tokens that were created in the vocabulary """ pass def decode(self, ids, skip_special_tokens=True): """ Decode the given list of ids back to a string This is used to decode anything coming back from a Language Model Args: ids (A :obj:`List/Tuple` of :obj:`int`): The list of ids that we want to decode skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether the special tokens should be removed from the decoded string Returns: :obj:`str`: The decoded string """ pass def decode_batch(self, sequences, skip_special_tokens=True): """ Decode a batch of ids back to their corresponding string Args: sequences (:obj:`List` of :obj:`List[int]`): The batch of sequences we want to decode skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether the special tokens should be removed from the decoded strings Returns: :obj:`List[str]`: A list of decoded strings """ pass @property def decoder(self): """ The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer """ pass def enable_padding( self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None ): """ Enable the padding Args: direction (:obj:`str`, `optional`, defaults to :obj:`right`): The direction in which to pad. Can be either ``right`` or ``left`` pad_to_multiple_of (:obj:`int`, `optional`): If specified, the padding length should always snap to the next multiple of the given value. For example if we were going to pad witha length of 250 but ``pad_to_multiple_of=8`` then we will pad to 256. pad_id (:obj:`int`, defaults to 0): The id to be used when padding pad_type_id (:obj:`int`, defaults to 0): The type id to be used when padding pad_token (:obj:`str`, defaults to :obj:`[PAD]`): The pad token to be used when padding length (:obj:`int`, `optional`): If specified, the length at which to pad. If not specified we pad using the size of the longest sequence in a batch. """ pass def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"): """ Enable truncation Args: max_length (:obj:`int`): The max length at which to truncate stride (:obj:`int`, `optional`): The length of the previous first sequence to be included in the overflowing sequence strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`): The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or ``only_second``. direction (:obj:`str`, defaults to :obj:`right`): Truncate direction """ pass def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True): """ Encode the given sequence and pair. This method can process raw text sequences as well as already pre-tokenized sequences. Example: Here are some examples of the inputs that are accepted:: encode("A single sequence")` encode("A sequence", "And its pair")` encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)` encode( [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ], is_pretokenized=True ) Args: sequence (:obj:`~tokenizers.InputSequence`): The main input sequence we want to encode. This sequence can be either raw text or pre-tokenized, according to the ``is_pretokenized`` argument: - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence` - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence` pair (:obj:`~tokenizers.InputSequence`, `optional`): An optional input sequence. The expected format is the same that for ``sequence``. is_pretokenized (:obj:`bool`, defaults to :obj:`False`): Whether the input is already pre-tokenized add_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to add the special tokens Returns: :class:`~tokenizers.Encoding`: The encoded result """ pass def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True): """ Encode the given batch of inputs. This method accept both raw text sequences as well as already pre-tokenized sequences. Example: Here are some examples of the inputs that are accepted:: encode_batch([ "A single sequence", ("A tuple with a sequence", "And its pair"), [ "A", "pre", "tokenized", "sequence" ], ([ "A", "pre", "tokenized", "sequence" ], "And its pair") ]) Args: input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`): A list of single sequences or pair sequences to encode. Each sequence can be either raw text or pre-tokenized, according to the ``is_pretokenized`` argument: - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput` - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput` is_pretokenized (:obj:`bool`, defaults to :obj:`False`): Whether the input is already pre-tokenized add_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to add the special tokens Returns: A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch """ pass @staticmethod def from_buffer(buffer): """ Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer. Args: buffer (:obj:`bytes`): A buffer containing a previously serialized :class:`~tokenizers.Tokenizer` Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass @staticmethod def from_file(path): """ Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path. Args: path (:obj:`str`): A path to a local JSON file representing a previously serialized :class:`~tokenizers.Tokenizer` Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass @staticmethod def from_pretrained(identifier, revision="main", auth_token=None): """ Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the Hugging Face Hub. Args: identifier (:obj:`str`): The identifier of a Model on the Hugging Face Hub, that contains a tokenizer.json file revision (:obj:`str`, defaults to `main`): A branch or commit id auth_token (:obj:`str`, `optional`, defaults to `None`): An optional auth token used to access private repositories on the Hugging Face Hub Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass @staticmethod def from_str(json): """ Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string. Args: json (:obj:`str`): A valid JSON string representing a previously serialized :class:`~tokenizers.Tokenizer` Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass def get_vocab(self, with_added_tokens=True): """ Get the underlying vocabulary Args: with_added_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to include the added tokens Returns: :obj:`Dict[str, int]`: The vocabulary """ pass def get_vocab_size(self, with_added_tokens=True): """ Get the size of the underlying vocabulary Args: with_added_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to include the added tokens Returns: :obj:`int`: The size of the vocabulary """ pass def id_to_token(self, id): """ Convert the given id to its corresponding token if it exists Args: id (:obj:`int`): The id to convert Returns: :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary """ pass @property def model(self): """ The :class:`~tokenizers.models.Model` in use by the Tokenizer """ pass def no_padding(self): """ Disable padding """ pass def no_truncation(self): """ Disable truncation """ pass @property def normalizer(self): """ The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer """ pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. :param is_pair: Boolean indicating if the input would be a single sentence or a pair :return: """ pass @property def padding(self): """ Get the current padding parameters `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead` Returns: (:obj:`dict`, `optional`): A dict with the current padding parameters if padding is enabled """ pass def post_process(self, encoding, pair=None, add_special_tokens=True): """ Apply all the post-processing steps to the given encodings. The various steps are: 1. Truncate according to the set truncation params (provided with :meth:`~tokenizers.Tokenizer.enable_truncation`) 2. Apply the :class:`~tokenizers.processors.PostProcessor` 3. Pad according to the set padding params (provided with :meth:`~tokenizers.Tokenizer.enable_padding`) Args: encoding (:class:`~tokenizers.Encoding`): The :class:`~tokenizers.Encoding` corresponding to the main sequence. pair (:class:`~tokenizers.Encoding`, `optional`): An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence. add_special_tokens (:obj:`bool`): Whether to add the special tokens Returns: :class:`~tokenizers.Encoding`: The final post-processed encoding """ pass @property def post_processor(self): """ The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer """ pass @property def pre_tokenizer(self): """ The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer """ pass def save(self, path, pretty=True): """ Save the :class:`~tokenizers.Tokenizer` to the file at the given path. Args: path (:obj:`str`): A path to a file in which to save the serialized tokenizer. pretty (:obj:`bool`, defaults to :obj:`True`): Whether the JSON file should be pretty formatted. """ pass def to_str(self, pretty=False): """ Gets a serialized string representing this :class:`~tokenizers.Tokenizer`. Args: pretty (:obj:`bool`, defaults to :obj:`False`): Whether the JSON string should be pretty formatted. Returns: :obj:`str`: A string representing the serialized Tokenizer """ pass def token_to_id(self, token): """ Convert the given token to its corresponding id if it exists Args: token (:obj:`str`): The token to convert Returns: :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary """ pass def train(self, files, trainer=None): """ Train the Tokenizer using the given files. Reads the files line by line, while keeping all the whitespace, even new lines. If you want to train from data store in-memory, you can check :meth:`~tokenizers.Tokenizer.train_from_iterator` Args: files (:obj:`List[str]`): A list of path to the files that we should use for training trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): An optional trainer that should be used to train our Model """ pass def train_from_iterator(self, iterator, trainer=None, length=None): """ Train the Tokenizer using the provided iterator. You can provide anything that is a Python Iterator * A list of sequences :obj:`List[str]` * A generator that yields :obj:`str` or :obj:`List[str]` * A Numpy array of strings * ... Args: iterator (:obj:`Iterator`): Any iterator over strings or list of strings trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): An optional trainer that should be used to train our Model length (:obj:`int`, `optional`): The total number of sequences in the iterator. This is used to provide meaningful progress tracking """ pass @property def truncation(self): """ Get the currently set truncation parameters `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead` Returns: (:obj:`dict`, `optional`): A dict with the current truncation parameters if truncation is enabled """ pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.py
from .. import normalizers Normalizer = normalizers.Normalizer BertNormalizer = normalizers.BertNormalizer NFD = normalizers.NFD NFKD = normalizers.NFKD NFC = normalizers.NFC NFKC = normalizers.NFKC Sequence = normalizers.Sequence Lowercase = normalizers.Lowercase Prepend = normalizers.Prepend Strip = normalizers.Strip StripAccents = normalizers.StripAccents Nmt = normalizers.Nmt Precompiled = normalizers.Precompiled Replace = normalizers.Replace NORMALIZERS = {"nfc": NFC, "nfd": NFD, "nfkc": NFKC, "nfkd": NFKD} def unicode_normalizer_from_str(normalizer: str) -> Normalizer: if normalizer not in NORMALIZERS: raise ValueError( "{} is not a known unicode normalizer. Available are {}".format(normalizer, NORMALIZERS.keys()) ) return NORMALIZERS[normalizer]()
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.pyi
# Generated content DO NOT EDIT class Normalizer: """ Base class for all normalizers This class is not supposed to be instantiated directly. Instead, any implementation of a Normalizer will return an instance of this class when instantiated. """ def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class BertNormalizer(Normalizer): """ BertNormalizer Takes care of normalizing raw text before giving it to a Bert model. This includes cleaning the text, handling accents, chinese chars and lowercasing Args: clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to clean the text, by removing any control characters and replacing all whitespaces by the classic one. handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to handle chinese chars by putting spaces around them. strip_accents (:obj:`bool`, `optional`): Whether to strip all accents. If this option is not specified (ie == None), then it will be determined by the value for `lowercase` (as in the original Bert). lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to lowercase. """ def __init__(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Lowercase(Normalizer): """ Lowercase Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class NFC(Normalizer): """ NFC Unicode Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class NFD(Normalizer): """ NFD Unicode Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class NFKC(Normalizer): """ NFKC Unicode Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class NFKD(Normalizer): """ NFKD Unicode Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Nmt(Normalizer): """ Nmt normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Precompiled(Normalizer): """ Precompiled normalizer Don't use manually it is used for compatiblity for SentencePiece. """ def __init__(self, precompiled_charsmap): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Prepend(Normalizer): """ Prepend normalizer """ def __init__(self, prepend): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Replace(Normalizer): """ Replace normalizer """ def __init__(self, pattern, content): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Sequence(Normalizer): """ Allows concatenating multiple other Normalizer as a Sequence. All the normalizers run in sequence in the given order Args: normalizers (:obj:`List[Normalizer]`): A list of Normalizer to be run as a sequence """ def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Strip(Normalizer): """ Strip normalizer """ def __init__(self, left=True, right=True): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class StripAccents(Normalizer): """ StripAccents normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/models/__init__.py
# Generated content DO NOT EDIT from .. import models Model = models.Model BPE = models.BPE Unigram = models.Unigram WordLevel = models.WordLevel WordPiece = models.WordPiece
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/models/__init__.pyi
# Generated content DO NOT EDIT class Model: """ Base class for all models The model represents the actual tokenization algorithm. This is the part that will contain and manage the learned vocabulary. This class cannot be constructed directly. Please use one of the concrete models. """ def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class BPE(Model): """ An implementation of the BPE (Byte-Pair Encoding) algorithm Args: vocab (:obj:`Dict[str, int]`, `optional`): A dictionnary of string keys and their ids :obj:`{"am": 0,...}` merges (:obj:`List[Tuple[str, str]]`, `optional`): A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]` cache_capacity (:obj:`int`, `optional`): The number of words that the BPE cache can contain. The cache allows to speed-up the process by keeping the result of the merge operations for a number of words. dropout (:obj:`float`, `optional`): A float between 0 and 1 that represents the BPE dropout to use. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. continuing_subword_prefix (:obj:`str`, `optional`): The prefix to attach to subword units that don't represent a beginning of word. end_of_word_suffix (:obj:`str`, `optional`): The suffix to attach to subword units that represent an end of word. fuse_unk (:obj:`bool`, `optional`): Whether to fuse any subsequent unknown tokens into a single one byte_fallback (:obj:`bool`, `optional`): Whether to use spm byte-fallback trick (defaults to False) """ def __init__( self, vocab=None, merges=None, cache_capacity=None, dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=None, byte_fallback=False, ): pass @staticmethod def from_file(cls, vocab, merge, **kwargs): """ Instantiate a BPE model from the given files. This method is roughly equivalent to doing:: vocab, merges = BPE.read_file(vocab_filename, merges_filename) bpe = BPE(vocab, merges) If you don't need to keep the :obj:`vocab, merges` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE` Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file merges (:obj:`str`): The path to a :obj:`merges.txt` file Returns: :class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(self, vocab, merges): """ Read a :obj:`vocab.json` and a :obj:`merges.txt` files This method provides a way to read and parse the content of these files, returning the relevant data structures. If you want to instantiate some BPE models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file merges (:obj:`str`): The path to a :obj:`merges.txt` file Returns: A :obj:`Tuple` with the vocab and the merges: The vocabulary and merges loaded into memory """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class Unigram(Model): """ An implementation of the Unigram algorithm Args: vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`): A list of vocabulary items and their relative score [("am", -0.2442),...] """ def __init__(self, vocab, unk_id, byte_fallback): pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class WordLevel(Model): """ An implementation of the WordLevel algorithm Most simple tokenizer model based on mapping tokens to their corresponding id. Args: vocab (:obj:`str`, `optional`): A dictionnary of string keys and their ids :obj:`{"am": 0,...}` unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. """ def __init__(self, vocab, unk_token): pass @staticmethod def from_file(vocab, unk_token): """ Instantiate a WordLevel model from the given file This method is roughly equivalent to doing:: vocab = WordLevel.read_file(vocab_filename) wordlevel = WordLevel(vocab) If you don't need to keep the :obj:`vocab` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to initialize a :class:`~tokenizers.models.WordLevel` Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file Returns: :class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(vocab): """ Read a :obj:`vocab.json` This method provides a way to read and parse the content of a vocabulary file, returning the relevant data structures. If you want to instantiate some WordLevel models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file Returns: :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class WordPiece(Model): """ An implementation of the WordPiece algorithm Args: vocab (:obj:`Dict[str, int]`, `optional`): A dictionnary of string keys and their ids :obj:`{"am": 0,...}` unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. max_input_chars_per_word (:obj:`int`, `optional`): The maximum number of characters to authorize in a single word. """ def __init__(self, vocab, unk_token, max_input_chars_per_word): pass @staticmethod def from_file(vocab, **kwargs): """ Instantiate a WordPiece model from the given file This method is roughly equivalent to doing:: vocab = WordPiece.read_file(vocab_filename) wordpiece = WordPiece(vocab) If you don't need to keep the :obj:`vocab` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to initialize a :class:`~tokenizers.models.WordPiece` Args: vocab (:obj:`str`): The path to a :obj:`vocab.txt` file Returns: :class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(vocab): """ Read a :obj:`vocab.txt` file This method provides a way to read and parse the content of a standard `vocab.txt` file as used by the WordPiece Model, returning the relevant data structures. If you want to instantiate some WordPiece models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.txt` file Returns: :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py
from .base_tokenizer import BaseTokenizer from .bert_wordpiece import BertWordPieceTokenizer from .byte_level_bpe import ByteLevelBPETokenizer from .char_level_bpe import CharBPETokenizer from .sentencepiece_bpe import SentencePieceBPETokenizer from .sentencepiece_unigram import SentencePieceUnigramTokenizer
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py
import json import os from typing import Iterator, List, Optional, Union, Tuple from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.models import Unigram from .base_tokenizer import BaseTokenizer class SentencePieceUnigramTokenizer(BaseTokenizer): """SentencePiece Unigram Tokenizer Represents the Unigram algorithm, with the pretokenization used by SentencePiece """ def __init__( self, vocab: Optional[List[Tuple[str, float]]] = None, replacement: str = "▁", add_prefix_space: bool = True, ): if vocab is not None: # Let Unigram(..) fail if only one of them is None tokenizer = Tokenizer(Unigram(vocab)) else: tokenizer = Tokenizer(Unigram()) tokenizer.normalizer = normalizers.Sequence( [normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")] ) tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) parameters = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(tokenizer, parameters) def train( self, files: Union[str, List[str]], vocab_size: int = 8000, show_progress: bool = True, special_tokens: Optional[List[Union[str, AddedToken]]] = None, initial_alphabet: Optional[List[str]] = None, unk_token: Optional[str] = None, ): """ Train the model using the given files Args: files (:obj:`List[str]`): A list of path to the files that we should use for training vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. """ if special_tokens is None: special_tokens = [] if initial_alphabet is None: initial_alphabet = [] trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 8000, show_progress: bool = True, special_tokens: Optional[List[Union[str, AddedToken]]] = None, initial_alphabet: Optional[List[str]] = None, unk_token: Optional[str] = None, length: Optional[int] = None, ): """ Train the model using the given iterator Args: iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`): Any iterator over strings or list of strings vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. length (:obj:`int`, `optional`): The total number of sequences in the iterator. This is used to provide meaningful progress tracking """ if special_tokens is None: special_tokens = [] if initial_alphabet is None: initial_alphabet = [] trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, ) @staticmethod def from_spm(filename: str): try: import sys sys.path.append(".") import sentencepiece_model_pb2 as model except Exception: raise Exception( "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required." ) m = model.ModelProto() m.ParseFromString(open(filename, "rb").read()) precompiled_charsmap = m.normalizer_spec.precompiled_charsmap vocab = [(piece.piece, piece.score) for piece in m.pieces] unk_id = m.trainer_spec.unk_id model_type = m.trainer_spec.model_type byte_fallback = m.trainer_spec.byte_fallback if model_type != 1: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" ) replacement = "▁" add_prefix_space = True tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback)) tokenizer.normalizer = normalizers.Sequence( [ normalizers.Precompiled(precompiled_charsmap), normalizers.Replace(Regex(" {2,}"), " "), ] ) tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) parameters = { "model": "SentencePieceUnigram", } obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters) BaseTokenizer.__init__(obj, tokenizer, parameters) return obj
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/base_tokenizer.py
from typing import Dict, List, Optional, Tuple, Union from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer from tokenizers.decoders import Decoder from tokenizers.models import Model from tokenizers.normalizers import Normalizer from tokenizers.pre_tokenizers import PreTokenizer from tokenizers.processors import PostProcessor Offsets = Tuple[int, int] class BaseTokenizer: def __init__(self, tokenizer: Tokenizer, parameters=None): self._tokenizer = tokenizer self._parameters = parameters if parameters is not None else {} def __repr__(self): return "Tokenizer(vocabulary_size={}, {})".format( self._tokenizer.get_vocab_size(), ", ".join(k + "=" + str(v) for k, v in self._parameters.items()), ) def num_special_tokens_to_add(self, is_pair: bool) -> int: """ Return the number of special tokens that would be added for single/pair sentences. :param is_pair: Boolean indicating if the input would be a single sentence or a pair :return: """ return self._tokenizer.num_special_tokens_to_add(is_pair) def get_vocab(self, with_added_tokens: bool = True) -> Dict[str, int]: """Returns the vocabulary Args: with_added_tokens: boolean: Whether to include the added tokens in the vocabulary Returns: The vocabulary """ return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens) def get_vocab_size(self, with_added_tokens: bool = True) -> int: """Return the size of vocabulary, with or without added tokens. Args: with_added_tokens: (`optional`) bool: Whether to count in added special tokens or not Returns: Size of vocabulary """ return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens) def enable_padding( self, direction: Optional[str] = "right", pad_to_multiple_of: Optional[int] = None, pad_id: Optional[int] = 0, pad_type_id: Optional[int] = 0, pad_token: Optional[str] = "[PAD]", length: Optional[int] = None, ): """Change the padding strategy Args: direction: (`optional`) str: Can be one of: `right` or `left` pad_to_multiple_of: (`optional`) unsigned int: If specified, the padding length should always snap to the next multiple of the given value. For example if we were going to pad with a length of 250 but `pad_to_multiple_of=8` then we will pad to 256. pad_id: (`optional`) unsigned int: The indice to be used when padding pad_type_id: (`optional`) unsigned int: The type indice to be used when padding pad_token: (`optional`) str: The pad token to be used when padding length: (`optional`) unsigned int: If specified, the length at which to pad. If not specified we pad using the size of the longest sequence in a batch """ return self._tokenizer.enable_padding( direction=direction, pad_to_multiple_of=pad_to_multiple_of, pad_id=pad_id, pad_type_id=pad_type_id, pad_token=pad_token, length=length, ) def no_padding(self): """Disable padding""" return self._tokenizer.no_padding() @property def padding(self) -> Optional[dict]: """Get the current padding parameters Returns: None if padding is disabled, a dict with the currently set parameters if the padding is enabled. """ return self._tokenizer.padding def enable_truncation(self, max_length: int, stride: Optional[int] = 0, strategy: Optional[str] = "longest_first"): """Change the truncation options Args: max_length: unsigned int: The maximum length at which to truncate stride: (`optional`) unsigned int: The length of the previous first sequence to be included in the overflowing sequence strategy: (`optional`) str: Can be one of `longest_first`, `only_first` or `only_second` """ return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy) def no_truncation(self): """Disable truncation""" return self._tokenizer.no_truncation() @property def truncation(self) -> Optional[dict]: """Get the current truncation parameters Returns: None if truncation is disabled, a dict with the current truncation parameters if truncation is enabled """ return self._tokenizer.truncation def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int: """Add the given tokens to the vocabulary Args: tokens: List[Union[str, AddedToken]]: A list of tokens to add to the vocabulary. Each token can either be a string, or an instance of AddedToken Returns: The number of tokens that were added to the vocabulary """ return self._tokenizer.add_tokens(tokens) def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int: """Add the given special tokens to the vocabulary, and treat them as special tokens. The special tokens will never be processed by the model, and will be removed while decoding. Args: tokens: List[Union[str, AddedToken]]: A list of special tokens to add to the vocabulary. Each token can either be a string, or an instance of AddedToken Returns: The number of tokens that were added to the vocabulary """ return self._tokenizer.add_special_tokens(special_tokens) def normalize(self, sequence: str) -> str: """Normalize the given sequence Args: sequence: str: The sequence to normalize Returns: The normalized string """ return self._tokenizer.normalize(sequence) def encode( self, sequence: InputSequence, pair: Optional[InputSequence] = None, is_pretokenized: bool = False, add_special_tokens: bool = True, ) -> Encoding: """Encode the given sequence and pair. This method can process raw text sequences as well as already pre-tokenized sequences. Args: sequence: InputSequence: The sequence we want to encode. This sequence can be either raw text or pre-tokenized, according to the `is_pretokenized` argument: - If `is_pretokenized=False`: `InputSequence` is expected to be `str` - If `is_pretokenized=True`: `InputSequence` is expected to be `Union[List[str], Tuple[str]]` is_pretokenized: bool: Whether the input is already pre-tokenized. add_special_tokens: bool: Whether to add the special tokens while encoding. Returns: An Encoding """ if sequence is None: raise ValueError("encode: `sequence` can't be `None`") return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens) def encode_batch( self, inputs: List[EncodeInput], is_pretokenized: bool = False, add_special_tokens: bool = True, ) -> List[Encoding]: """Encode the given inputs. This method accept both raw text sequences as well as already pre-tokenized sequences. Args: inputs: List[EncodeInput]: A list of single sequences or pair sequences to encode. Each `EncodeInput` is expected to be of the following form: `Union[InputSequence, Tuple[InputSequence, InputSequence]]` Each `InputSequence` can either be raw text or pre-tokenized, according to the `is_pretokenized` argument: - If `is_pretokenized=False`: `InputSequence` is expected to be `str` - If `is_pretokenized=True`: `InputSequence` is expected to be `Union[List[str], Tuple[str]]` is_pretokenized: bool: Whether the input is already pre-tokenized. add_special_tokens: bool: Whether to add the special tokens while encoding. Returns: A list of Encoding """ if inputs is None: raise ValueError("encode_batch: `inputs` can't be `None`") return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens) def decode(self, ids: List[int], skip_special_tokens: Optional[bool] = True) -> str: """Decode the given list of ids to a string sequence Args: ids: List[unsigned int]: A list of ids to be decoded skip_special_tokens: (`optional`) boolean: Whether to remove all the special tokens from the output string Returns: The decoded string """ if ids is None: raise ValueError("None input is not valid. Should be a list of integers.") return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens) def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool] = True) -> str: """Decode the list of sequences to a list of string sequences Args: sequences: List[List[unsigned int]]: A list of sequence of ids to be decoded skip_special_tokens: (`optional`) boolean: Whether to remove all the special tokens from the output strings Returns: A list of decoded strings """ if sequences is None: raise ValueError("None input is not valid. Should be list of list of integers.") return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens) def token_to_id(self, token: str) -> Optional[int]: """Convert the given token to its corresponding id Args: token: str: The token to convert Returns: The corresponding id if it exists, None otherwise """ return self._tokenizer.token_to_id(token) def id_to_token(self, id: int) -> Optional[str]: """Convert the given token id to its corresponding string Args: token: id: The token id to convert Returns: The corresponding string if it exists, None otherwise """ return self._tokenizer.id_to_token(id) def save_model(self, directory: str, prefix: Optional[str] = None): """Save the current model to the given directory Args: directory: str: A path to the destination directory prefix: (Optional) str: An optional prefix, used to prefix each file name """ return self._tokenizer.model.save(directory, prefix=prefix) def save(self, path: str, pretty: bool = True): """Save the current Tokenizer at the given path Args: path: str: A path to the destination Tokenizer file """ return self._tokenizer.save(path, pretty) def to_str(self, pretty: bool = False): """Get a serialized JSON version of the Tokenizer as a str Args: pretty: bool: Whether the JSON string should be prettified Returns: str """ return self._tokenizer.to_str(pretty) def post_process( self, encoding: Encoding, pair: Optional[Encoding] = None, add_special_tokens: bool = True ) -> Encoding: """Apply all the post-processing steps to the given encodings. The various steps are: 1. Truncate according to global params (provided to `enable_truncation`) 2. Apply the PostProcessor 3. Pad according to global params. (provided to `enable_padding`) Args: encoding: Encoding: The main Encoding to post process pair: Optional[Encoding]: An optional pair Encoding add_special_tokens: bool: Whether to add special tokens Returns: The resulting Encoding """ return self._tokenizer.post_process(encoding, pair, add_special_tokens) @property def model(self) -> Model: return self._tokenizer.model @model.setter def model(self, model: Model): self._tokenizer.model = model @property def normalizer(self) -> Normalizer: return self._tokenizer.normalizer @normalizer.setter def normalizer(self, normalizer: Normalizer): self._tokenizer.normalizer = normalizer @property def pre_tokenizer(self) -> PreTokenizer: return self._tokenizer.pre_tokenizer @pre_tokenizer.setter def pre_tokenizer(self, pre_tokenizer: PreTokenizer): self._tokenizer.pre_tokenizer = pre_tokenizer @property def post_processor(self) -> PostProcessor: return self._tokenizer.post_processor @post_processor.setter def post_processor(self, post_processor: PostProcessor): self._tokenizer.post_processor = post_processor @property def decoder(self) -> Decoder: return self._tokenizer.decoder @decoder.setter def decoder(self, decoder: Decoder): self._tokenizer.decoder = decoder
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py
from typing import Dict, Iterator, List, Optional, Tuple, Union from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers from tokenizers.models import BPE from tokenizers.normalizers import NFKC from .base_tokenizer import BaseTokenizer class SentencePieceBPETokenizer(BaseTokenizer): """SentencePiece BPE Tokenizer Represents the BPE algorithm, with the pretokenization used by SentencePiece """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, unk_token: Union[str, AddedToken] = "<unk>", replacement: str = "▁", add_prefix_space: bool = True, dropout: Optional[float] = None, fuse_unk: Optional[bool] = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk)) else: tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk)) if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) tokenizer.normalizer = NFKC() tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) parameters = { "model": "SentencePieceBPE", "unk_token": unk_token, "replacement": replacement, "add_prefix_space": add_prefix_space, "dropout": dropout, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return SentencePieceBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], show_progress: bool = True, ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], show_progress: bool = True, length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/bert_wordpiece.py
from typing import Dict, Iterator, List, Optional, Union from tokenizers import AddedToken, Tokenizer, decoders, trainers from tokenizers.models import WordPiece from tokenizers.normalizers import BertNormalizer from tokenizers.pre_tokenizers import BertPreTokenizer from tokenizers.processors import BertProcessing from .base_tokenizer import BaseTokenizer class BertWordPieceTokenizer(BaseTokenizer): """Bert WordPiece Tokenizer""" def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, unk_token: Union[str, AddedToken] = "[UNK]", sep_token: Union[str, AddedToken] = "[SEP]", cls_token: Union[str, AddedToken] = "[CLS]", pad_token: Union[str, AddedToken] = "[PAD]", mask_token: Union[str, AddedToken] = "[MASK]", clean_text: bool = True, handle_chinese_chars: bool = True, strip_accents: Optional[bool] = None, lowercase: bool = True, wordpieces_prefix: str = "##", ): if vocab is not None: tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(unk_token))) else: tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token))) # Let the tokenizer know about special tokens if they are part of the vocab if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) if tokenizer.token_to_id(str(sep_token)) is not None: tokenizer.add_special_tokens([str(sep_token)]) if tokenizer.token_to_id(str(cls_token)) is not None: tokenizer.add_special_tokens([str(cls_token)]) if tokenizer.token_to_id(str(pad_token)) is not None: tokenizer.add_special_tokens([str(pad_token)]) if tokenizer.token_to_id(str(mask_token)) is not None: tokenizer.add_special_tokens([str(mask_token)]) tokenizer.normalizer = BertNormalizer( clean_text=clean_text, handle_chinese_chars=handle_chinese_chars, strip_accents=strip_accents, lowercase=lowercase, ) tokenizer.pre_tokenizer = BertPreTokenizer() if vocab is not None: sep_token_id = tokenizer.token_to_id(str(sep_token)) if sep_token_id is None: raise TypeError("sep_token not found in the vocabulary") cls_token_id = tokenizer.token_to_id(str(cls_token)) if cls_token_id is None: raise TypeError("cls_token not found in the vocabulary") tokenizer.post_processor = BertProcessing((str(sep_token), sep_token_id), (str(cls_token), cls_token_id)) tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix) parameters = { "model": "BertWordPiece", "unk_token": unk_token, "sep_token": sep_token, "cls_token": cls_token, "pad_token": pad_token, "mask_token": mask_token, "clean_text": clean_text, "handle_chinese_chars": handle_chinese_chars, "strip_accents": strip_accents, "lowercase": lowercase, "wordpieces_prefix": wordpieces_prefix, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab: str, **kwargs): vocab = WordPiece.read_file(vocab) return BertWordPieceTokenizer(vocab, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, limit_alphabet: int = 1000, initial_alphabet: List[str] = [], special_tokens: List[Union[str, AddedToken]] = [ "[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]", ], show_progress: bool = True, wordpieces_prefix: str = "##", ): """Train the model using the given files""" trainer = trainers.WordPieceTrainer( vocab_size=vocab_size, min_frequency=min_frequency, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, special_tokens=special_tokens, show_progress=show_progress, continuing_subword_prefix=wordpieces_prefix, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, limit_alphabet: int = 1000, initial_alphabet: List[str] = [], special_tokens: List[Union[str, AddedToken]] = [ "[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]", ], show_progress: bool = True, wordpieces_prefix: str = "##", length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.WordPieceTrainer( vocab_size=vocab_size, min_frequency=min_frequency, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, special_tokens=special_tokens, show_progress=show_progress, continuing_subword_prefix=wordpieces_prefix, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py
from typing import Dict, Iterator, List, Optional, Tuple, Union from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers from tokenizers.models import BPE from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str from .base_tokenizer import BaseTokenizer class ByteLevelBPETokenizer(BaseTokenizer): """ByteLevelBPETokenizer Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, add_prefix_space: bool = False, lowercase: bool = False, dropout: Optional[float] = None, unicode_normalizer: Optional[str] = None, continuing_subword_prefix: Optional[str] = None, end_of_word_suffix: Optional[str] = None, trim_offsets: bool = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer( BPE( vocab, merges, dropout=dropout, continuing_subword_prefix=continuing_subword_prefix or "", end_of_word_suffix=end_of_word_suffix or "", ) ) else: tokenizer = Tokenizer(BPE()) # Check for Unicode normalization first (before everything else) normalizers = [] if unicode_normalizer: normalizers += [unicode_normalizer_from_str(unicode_normalizer)] if lowercase: normalizers += [Lowercase()] # Create the normalizer structure if len(normalizers) > 0: if len(normalizers) > 1: tokenizer.normalizer = Sequence(normalizers) else: tokenizer.normalizer = normalizers[0] tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets) parameters = { "model": "ByteLevelBPE", "add_prefix_space": add_prefix_space, "lowercase": lowercase, "dropout": dropout, "unicode_normalizer": unicode_normalizer, "continuing_subword_prefix": continuing_subword_prefix, "end_of_word_suffix": end_of_word_suffix, "trim_offsets": trim_offsets, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return ByteLevelBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, show_progress: bool = True, special_tokens: List[Union[str, AddedToken]] = [], ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, show_progress: bool = True, special_tokens: List[Union[str, AddedToken]] = [], length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/char_level_bpe.py
from typing import Dict, Iterator, List, Optional, Tuple, Union from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers from ..models import BPE from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str from .base_tokenizer import BaseTokenizer class CharBPETokenizer(BaseTokenizer): """Original BPE Tokenizer Represents the BPE algorithm, as introduced by Rico Sennrich (https://arxiv.org/abs/1508.07909) The defaults settings corresponds to OpenAI GPT BPE tokenizers and differs from the original Sennrich subword-nmt implementation by the following options that you can deactivate: - adding a normalizer to clean up the text (deactivate with `bert_normalizer=False`) by: * removing any control characters and replacing all whitespaces by the classic one. * handle chinese chars by putting spaces around them. * strip all accents. - spitting on punctuation in addition to whitespaces (deactivate it with `split_on_whitespace_only=True`) """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, unk_token: Union[str, AddedToken] = "<unk>", suffix: str = "</w>", dropout: Optional[float] = None, lowercase: bool = False, unicode_normalizer: Optional[str] = None, bert_normalizer: bool = True, split_on_whitespace_only: bool = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer( BPE( vocab, merges, dropout=dropout, unk_token=str(unk_token), end_of_word_suffix=suffix, ) ) else: tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix)) if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) # Check for Unicode normalization first (before everything else) normalizers = [] if unicode_normalizer: normalizers += [unicode_normalizer_from_str(unicode_normalizer)] if bert_normalizer: normalizers += [BertNormalizer(lowercase=False)] if lowercase: normalizers += [Lowercase()] # Create the normalizer structure if len(normalizers) > 0: if len(normalizers) > 1: tokenizer.normalizer = Sequence(normalizers) else: tokenizer.normalizer = normalizers[0] if split_on_whitespace_only: tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit() else: tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tokenizer.decoder = decoders.BPEDecoder(suffix=suffix) parameters = { "model": "BPE", "unk_token": unk_token, "suffix": suffix, "dropout": dropout, "lowercase": lowercase, "unicode_normalizer": unicode_normalizer, "bert_normalizer": bert_normalizer, "split_on_whitespace_only": split_on_whitespace_only, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return CharBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], suffix: Optional[str] = "</w>", show_progress: bool = True, ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, end_of_word_suffix=suffix, show_progress=show_progress, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], suffix: Optional[str] = "</w>", show_progress: bool = True, length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, end_of_word_suffix=suffix, show_progress=show_progress, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/__init__.py
from .visualizer import Annotation, EncodingVisualizer
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css
.tokenized-text { width:100%; padding:2rem; max-height: 400px; overflow-y: auto; box-sizing:border-box; line-height:4rem; /* Lots of space between lines */ font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace; box-shadow: 2px 2px 2px rgba(0,0,0,0.2); background-color: rgba(0,0,0,0.01); letter-spacing:2px; /* Give some extra separation between chars */ } .non-token{ /* White space and other things the tokenizer ignores*/ white-space: pre; letter-spacing:4px; border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/ border-bottom:1px solid #A0A0A0; line-height: 1rem; height: calc(100% - 2px); } .token { white-space: pre; position:relative; color:black; letter-spacing:2px; } .annotation{ white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */ border-radius:4px; position:relative; width:fit-content; } .annotation:before { /*The before holds the text and the after holds the background*/ z-index:1000; /* Make sure this is above the background */ content:attr(data-label); /* The annotations label is on a data attribute */ color:white; position:absolute; font-size:1rem; text-align:center; font-weight:bold; top:1.75rem; line-height:0; left:0; width:100%; padding:0.5rem 0; /* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/ overflow: hidden; white-space: nowrap; text-overflow:ellipsis; } .annotation:after { content:attr(data-label); /* The content defines the width of the annotation*/ position:absolute; font-size:0.75rem; text-align:center; font-weight:bold; text-overflow:ellipsis; top:1.75rem; line-height:0; overflow: hidden; white-space: nowrap; left:0; width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ padding:0.5rem 0; /* Nast hack below: We set the annotations color in code because we don't know the colors at css time. But you can't pass a color as a data attribute to get it into the pseudo element (this thing) So to get around that, annotations have the color set on them with a style attribute and then we can get the color with currentColor. Annotations wrap tokens and tokens set the color back to black */ background-color: currentColor; } .annotation:hover::after, .annotation:hover::before{ /* When the user hovers over an annotation expand the label to display in full */ min-width: fit-content; } .annotation:hover{ /* Emphasize the annotation start end with a border on hover*/ border-color: currentColor; border: 2px solid; } .special-token:not(:empty){ /* A none empty special token is like UNK (as opposed to CLS which has no representation in the text ) */ position:relative; } .special-token:empty::before{ /* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/ content:attr(data-stok); background:#202020; font-size:0.75rem; color:white; margin: 0 0.25rem; padding: 0.25rem; border-radius:4px } .special-token:not(:empty):before { /* Special tokens that have text (UNK) are displayed above the actual text*/ content:attr(data-stok); position:absolute; bottom:1.75rem; min-width:100%; width:100%; height:1rem; line-height:1rem; font-size:1rem; text-align:center; color:white; font-weight:bold; background:#202020; border-radius:10%; } /* We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations instead we apply even and odd class at generation time and color them that way */ .even-token{ background:#DCDCDC ; border: 1px solid #DCDCDC; } .odd-token{ background:#A0A0A0; border: 1px solid #A0A0A0; } .even-token.multi-token,.odd-token.multi-token{ background: repeating-linear-gradient( 45deg, transparent, transparent 1px, #ccc 1px, #ccc 1px ), /* on "bottom" */ linear-gradient( to bottom, #FFB6C1, #999 ); } .multi-token:hover::after { content:"This char has more than 1 token"; /* The content defines the width of the annotation*/ color:white; background-color: black; position:absolute; font-size:0.75rem; text-align:center; font-weight:bold; text-overflow:ellipsis; top:1.75rem; line-height:0; overflow: hidden; white-space: nowrap; left:0; width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ padding:0.5rem 0; }
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/visualizer.py
import itertools import os import re from string import Template from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple from tokenizers import Encoding, Tokenizer dirname = os.path.dirname(__file__) css_filename = os.path.join(dirname, "visualizer-styles.css") with open(css_filename) as f: css = f.read() class Annotation: start: int end: int label: int def __init__(self, start: int, end: int, label: str): self.start = start self.end = end self.label = label AnnotationList = List[Annotation] PartialIntList = List[Optional[int]] class CharStateKey(NamedTuple): token_ix: Optional[int] anno_ix: Optional[int] class CharState: char_ix: Optional[int] def __init__(self, char_ix): self.char_ix = char_ix self.anno_ix: Optional[int] = None self.tokens: List[int] = [] @property def token_ix(self): return self.tokens[0] if len(self.tokens) > 0 else None @property def is_multitoken(self): """ BPE tokenizers can output more than one token for a char """ return len(self.tokens) > 1 def partition_key(self) -> CharStateKey: return CharStateKey( token_ix=self.token_ix, anno_ix=self.anno_ix, ) class Aligned: pass class EncodingVisualizer: """ Build an EncodingVisualizer Args: tokenizer (:class:`~tokenizers.Tokenizer`): A tokenizer instance default_to_notebook (:obj:`bool`): Whether to render html output in a notebook by default annotation_converter (:obj:`Callable`, `optional`): An optional (lambda) function that takes an annotation in any format and returns an Annotation object """ unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE) def __init__( self, tokenizer: Tokenizer, default_to_notebook: bool = True, annotation_converter: Optional[Callable[[Any], Annotation]] = None, ): if default_to_notebook: try: from IPython.core.display import HTML, display except ImportError as e: raise Exception( """We couldn't import IPython utils for html display. Are you running in a notebook? You can also pass `default_to_notebook=False` to get back raw HTML """ ) self.tokenizer = tokenizer self.default_to_notebook = default_to_notebook self.annotation_coverter = annotation_converter pass def __call__( self, text: str, annotations: AnnotationList = [], default_to_notebook: Optional[bool] = None, ) -> Optional[str]: """ Build a visualization of the given text Args: text (:obj:`str`): The text to tokenize annotations (:obj:`List[Annotation]`, `optional`): An optional list of annotations of the text. The can either be an annotation class or anything else if you instantiated the visualizer with a converter function default_to_notebook (:obj:`bool`, `optional`, defaults to `False`): If True, will render the html in a notebook. Otherwise returns an html string. Returns: The HTML string if default_to_notebook is False, otherwise (default) returns None and renders the HTML in the notebook """ final_default_to_notebook = self.default_to_notebook if default_to_notebook is not None: final_default_to_notebook = default_to_notebook if final_default_to_notebook: try: from IPython.core.display import HTML, display except ImportError as e: raise Exception( """We couldn't import IPython utils for html display. Are you running in a notebook?""" ) if self.annotation_coverter is not None: annotations = list(map(self.annotation_coverter, annotations)) encoding = self.tokenizer.encode(text) html = EncodingVisualizer.__make_html(text, encoding, annotations) if final_default_to_notebook: display(HTML(html)) else: return html @staticmethod def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]: """ Generates a color palette for all the labels in a given set of annotations Args: annotations (:obj:`Annotation`): A list of annotations Returns: :obj:`dict`: A dictionary mapping labels to colors in HSL format """ if len(annotations) == 0: return {} labels = set(map(lambda x: x.label, annotations)) num_labels = len(labels) h_step = int(255 / num_labels) if h_step < 20: h_step = 20 s = 32 l = 64 h = 10 colors = {} for label in sorted(labels): # sort so we always get the same colors for a given set of labels colors[label] = f"hsl({h},{s}%,{l}%" h += h_step return colors @staticmethod def consecutive_chars_to_html( consecutive_chars_list: List[CharState], text: str, encoding: Encoding, ): """ Converts a list of "consecutive chars" into a single HTML element. Chars are consecutive if they fall under the same word, token and annotation. The CharState class is a named tuple with a "partition_key" method that makes it easy to compare if two chars are consecutive. Args: consecutive_chars_list (:obj:`List[CharState]`): A list of CharStates that have been grouped together text (:obj:`str`): The original text being processed encoding (:class:`~tokenizers.Encoding`): The encoding returned from the tokenizer Returns: :obj:`str`: The HTML span for a set of consecutive chars """ first = consecutive_chars_list[0] if first.char_ix is None: # its a special token stoken = encoding.tokens[first.token_ix] # special tokens are represented as empty spans. We use the data attribute and css # magic to display it return f'<span class="special-token" data-stoken={stoken}></span>' # We're not in a special token so this group has a start and end. last = consecutive_chars_list[-1] start = first.char_ix end = last.char_ix + 1 span_text = text[start:end] css_classes = [] # What css classes will we apply on the resulting span data_items = {} # What data attributes will we apply on the result span if first.token_ix is not None: # We can either be in a token or not (e.g. in white space) css_classes.append("token") if first.is_multitoken: css_classes.append("multi-token") if first.token_ix % 2: # We use this to color alternating tokens. # A token might be split by an annotation that ends in the middle of it, so this # lets us visually indicate a consecutive token despite its possible splitting in # the html markup css_classes.append("odd-token") else: # Like above, but a different color so we can see the tokens alternate css_classes.append("even-token") if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None: # This is a special token that is in the text. probably UNK css_classes.append("special-token") # TODO is this the right name for the data attribute ? data_items["stok"] = encoding.tokens[first.token_ix] else: # In this case we are looking at a group/single char that is not tokenized. # e.g. white space css_classes.append("non-token") css = f'''class="{' '.join(css_classes)}"''' data = "" for key, val in data_items.items(): data += f' data-{key}="{val}"' return f"<span {css} {data} >{span_text}</span>" @staticmethod def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str: char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations) current_consecutive_chars = [char_states[0]] prev_anno_ix = char_states[0].anno_ix spans = [] label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations) cur_anno_ix = char_states[0].anno_ix if cur_anno_ix is not None: # If we started in an annotation make a span for it anno = annotations[cur_anno_ix] label = anno.label color = label_colors_dict[label] spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">') for cs in char_states[1:]: cur_anno_ix = cs.anno_ix if cur_anno_ix != prev_anno_ix: # If we've transitioned in or out of an annotation spans.append( # Create a span from the current consecutive characters EncodingVisualizer.consecutive_chars_to_html( current_consecutive_chars, text=text, encoding=encoding, ) ) current_consecutive_chars = [cs] if prev_anno_ix is not None: # if we transitioned out of an annotation close it's span spans.append("</span>") if cur_anno_ix is not None: # If we entered a new annotation make a span for it anno = annotations[cur_anno_ix] label = anno.label color = label_colors_dict[label] spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">') prev_anno_ix = cur_anno_ix if cs.partition_key() == current_consecutive_chars[0].partition_key(): # If the current charchter is in the same "group" as the previous one current_consecutive_chars.append(cs) else: # Otherwise we make a span for the previous group spans.append( EncodingVisualizer.consecutive_chars_to_html( current_consecutive_chars, text=text, encoding=encoding, ) ) # An reset the consecutive_char_list to form a new group current_consecutive_chars = [cs] # All that's left is to fill out the final span # TODO I think there is an edge case here where an annotation's span might not close spans.append( EncodingVisualizer.consecutive_chars_to_html( current_consecutive_chars, text=text, encoding=encoding, ) ) res = HTMLBody(spans) # Send the list of spans to the body of our html return res @staticmethod def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList: """ Args: text (:obj:`str`): The raw text we want to align to annotations (:obj:`AnnotationList`): A (possibly empty) list of annotations Returns: A list of length len(text) whose entry at index i is None if there is no annotation on charachter i or k, the index of the annotation that covers index i where k is with respect to the list of annotations """ annotation_map = [None] * len(text) for anno_ix, a in enumerate(annotations): for i in range(a.start, a.end): annotation_map[i] = anno_ix return annotation_map @staticmethod def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]: """ For each character in the original text, we emit a tuple representing it's "state": * which token_ix it corresponds to * which word_ix it corresponds to * which annotation_ix it corresponds to Args: text (:obj:`str`): The raw text we want to align to annotations (:obj:`List[Annotation]`): A (possibly empty) list of annotations encoding: (:class:`~tokenizers.Encoding`): The encoding returned from the tokenizer Returns: :obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what it's state is """ annotation_map = EncodingVisualizer.__make_anno_map(text, annotations) # Todo make this a dataclass or named tuple char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))] for token_ix, token in enumerate(encoding.tokens): offsets = encoding.token_to_chars(token_ix) if offsets is not None: start, end = offsets for i in range(start, end): char_states[i].tokens.append(token_ix) for char_ix, anno_ix in enumerate(annotation_map): char_states[char_ix].anno_ix = anno_ix return char_states def HTMLBody(children: List[str], css_styles=css) -> str: """ Generates the full html with css from a list of html spans Args: children (:obj:`List[str]`): A list of strings, assumed to be html elements css_styles (:obj:`str`, `optional`): Optional alternative implementation of the css Returns: :obj:`str`: An HTML string with style markup """ children_text = "".join(children) return f""" <html> <head> <style> {css_styles} </style> </head> <body> <div class="tokenized-text" dir=auto> {children_text} </div> </body> </html> """
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.py
from .. import decoders Decoder = decoders.Decoder ByteLevel = decoders.ByteLevel Replace = decoders.Replace WordPiece = decoders.WordPiece ByteFallback = decoders.ByteFallback Fuse = decoders.Fuse Strip = decoders.Strip Metaspace = decoders.Metaspace BPEDecoder = decoders.BPEDecoder CTC = decoders.CTC Sequence = decoders.Sequence
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.pyi
# Generated content DO NOT EDIT class Decoder: """ Base class for all decoders This class is not supposed to be instantiated directly. Instead, any implementation of a Decoder will return an instance of this class when instantiated. """ def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class BPEDecoder(Decoder): """ BPEDecoder Decoder Args: suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`): The suffix that was used to caracterize an end-of-word. This suffix will be replaced by whitespaces during the decoding """ def __init__(self, suffix="</w>"): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class ByteFallback(Decoder): """ ByteFallback Decoder ByteFallback is a simple trick which converts tokens looking like `<0x61>` to pure bytes, and attempts to make them into a string. If the tokens cannot be decoded you will get � instead for each inconvertable byte token """ def __init__(self): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class ByteLevel(Decoder): """ ByteLevel Decoder This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel` :class:`~tokenizers.pre_tokenizers.PreTokenizer`. """ def __init__(self): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class CTC(Decoder): """ CTC Decoder Args: pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`): The pad token used by CTC to delimit a new token. word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`): The word delimiter token. It will be replaced by a <space> cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation, and some abbreviated english forms. """ def __init__(self, pad_token="<pad>", word_delimiter_token="|", cleanup=True): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Fuse(Decoder): """ Fuse Decoder Fuse simply fuses every token into a single string. This is the last step of decoding, this decoder exists only if there is need to add other decoders *after* the fusion """ def __init__(self): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Metaspace(Decoder): """ Metaspace Decoder Args: replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): The replacement character. Must be exactly one character. By default we use the `▁` (U+2581) meta symbol (Same as in SentencePiece). add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to add a space to the first word if there isn't already one. This lets us treat `hello` exactly like `say hello`. """ def __init__(self, replacement="▁", add_prefix_space=True): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Replace(Decoder): """ Replace Decoder This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace` :class:`~tokenizers.pre_tokenizers.PreTokenizer`. """ def __init__(self, pattern, content): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Sequence(Decoder): """ Sequence Decoder Args: decoders (:obj:`List[Decoder]`) The decoders that need to be chained """ def __init__(self, decoders): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Strip(Decoder): """ Strip normalizer Strips n left characters of each token, or n right characters of each token """ def __init__(self, content, left=0, right=0): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class WordPiece(Decoder): """ WordPiece Decoder Args: prefix (:obj:`str`, `optional`, defaults to :obj:`##`): The prefix to use for subwords that are not a beginning-of-word cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation, and some abbreviated english forms. """ def __init__(self, prefix="##", cleanup=True): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/processors/__init__.py
# Generated content DO NOT EDIT from .. import processors PostProcessor = processors.PostProcessor BertProcessing = processors.BertProcessing ByteLevel = processors.ByteLevel RobertaProcessing = processors.RobertaProcessing Sequence = processors.Sequence TemplateProcessing = processors.TemplateProcessing
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/processors/__init__.pyi
# Generated content DO NOT EDIT class PostProcessor: """ Base class for all post-processors This class is not supposed to be instantiated directly. Instead, any implementation of a PostProcessor will return an instance of this class when instantiated. """ def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class BertProcessing(PostProcessor): """ This post-processor takes care of adding the special tokens needed by a Bert model: - a SEP token - a CLS token Args: sep (:obj:`Tuple[str, int]`): A tuple with the string representation of the SEP token, and its id cls (:obj:`Tuple[str, int]`): A tuple with the string representation of the CLS token, and its id """ def __init__(self, sep, cls): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class ByteLevel(PostProcessor): """ This post-processor takes care of trimming the offsets. By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't want the offsets to include these whitespaces, then this PostProcessor must be used. Args: trim_offsets (:obj:`bool`): Whether to trim the whitespaces from the produced offsets. """ def __init__(self, trim_offsets=True): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class RobertaProcessing(PostProcessor): """ This post-processor takes care of adding the special tokens needed by a Roberta model: - a SEP token - a CLS token It also takes care of trimming the offsets. By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't want the offsets to include these whitespaces, then this PostProcessor should be initialized with :obj:`trim_offsets=True` Args: sep (:obj:`Tuple[str, int]`): A tuple with the string representation of the SEP token, and its id cls (:obj:`Tuple[str, int]`): A tuple with the string representation of the CLS token, and its id trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to trim the whitespaces from the produced offsets. add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether the add_prefix_space option was enabled during pre-tokenization. This is relevant because it defines the way the offsets are trimmed out. """ def __init__(self, sep, cls, trim_offsets=True, add_prefix_space=True): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class Sequence(PostProcessor): """ Sequence Processor Args: processors (:obj:`List[PostProcessor]`) The processors that need to be chained """ def __init__(self, processors): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class TemplateProcessing(PostProcessor): """ Provides a way to specify templates in order to add the special tokens to each input sequence as relevant. Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair sequences. The final result looks like this: - Single sequence: :obj:`[CLS] Hello there [SEP]` - Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]` With the type ids as following:: [CLS] ... [SEP] ... [SEP] 0 0 0 1 1 You can achieve such behavior using a TemplateProcessing:: TemplateProcessing( single="[CLS] $0 [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[("[CLS]", 1), ("[SEP]", 0)], ) In this example, each input sequence is identified using a ``$`` construct. This identifier lets us specify each input sequence, and the type_id to use. When nothing is specified, it uses the default values. Here are the different ways to specify it: - Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B`` - Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ... - Specifying both: ``$A:0``, ``$B:1``, ... The same construct is used for special tokens: ``<identifier>(:<type_id>)?``. **Warning**: You must ensure that you are giving the correct tokens/ids as these will be added to the Encoding without any further check. If the given ids correspond to something totally different in a `Tokenizer` using this `PostProcessor`, it might lead to unexpected results. Args: single (:obj:`Template`): The template used for single sequences pair (:obj:`Template`): The template used when both sequences are specified special_tokens (:obj:`Tokens`): The list of special tokens used in each sequences Types: Template (:obj:`str` or :obj:`List`): - If a :obj:`str` is provided, the whitespace is used as delimiter between tokens - If a :obj:`List[str]` is provided, a list of tokens Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`): - A :obj:`Tuple` with both a token and its associated ID, in any order - A :obj:`dict` with the following keys: - "id": :obj:`str` => The special token id, as specified in the Template - "ids": :obj:`List[int]` => The associated IDs - "tokens": :obj:`List[str]` => The associated tokens The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have the same length. """ def __init__(self, single, pair, special_tokens): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.py
# Generated content DO NOT EDIT from .. import trainers Trainer = trainers.Trainer BpeTrainer = trainers.BpeTrainer UnigramTrainer = trainers.UnigramTrainer WordLevelTrainer = trainers.WordLevelTrainer WordPieceTrainer = trainers.WordPieceTrainer
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.pyi
# Generated content DO NOT EDIT class Trainer: """ Base class for all trainers This class is not supposed to be instantiated directly. Instead, any implementation of a Trainer will return an instance of this class when instantiated. """ class BpeTrainer(Trainer): """ Trainer capable of training a BPE model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. limit_alphabet (:obj:`int`, `optional`): The maximum different characters to keep in the alphabet. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. continuing_subword_prefix (:obj:`str`, `optional`): A prefix to be used for every subword that is not a beginning-of-word. end_of_word_suffix (:obj:`str`, `optional`): A suffix to be used for every subword that is a end-of-word. max_token_length (:obj:`int`, `optional`): Prevents creating tokens longer than the specified size. This can help with reducing polluting your vocabulary with highly repetitive tokens like `======` for wikipedia """ class UnigramTrainer(Trainer): """ Trainer capable of training a Unigram model Args: vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. shrinking_factor (:obj:`float`): The shrinking factor used at each step of the training to prune the vocabulary. unk_token (:obj:`str`): The token used for out-of-vocabulary tokens. max_piece_length (:obj:`int`): The maximum length of a given token. n_sub_iterations (:obj:`int`): The number of iterations of the EM algorithm to perform before pruning the vocabulary. """ def __init__( self, vocab_size=8000, show_progress=True, special_tokens=[], shrinking_factor=0.75, unk_token=None, max_piece_length=16, n_sub_iterations=2, ): pass class WordLevelTrainer(Trainer): """ Trainer capable of training a WorldLevel model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`): A list of special tokens the model should know of. """ class WordPieceTrainer(Trainer): """ Trainer capable of training a WordPiece model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. limit_alphabet (:obj:`int`, `optional`): The maximum different characters to keep in the alphabet. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. continuing_subword_prefix (:obj:`str`, `optional`): A prefix to be used for every subword that is not a beginning-of-word. end_of_word_suffix (:obj:`str`, `optional`): A suffix to be used for every subword that is a end-of-word. """ def __init__( self, vocab_size=30000, min_frequency=0, show_progress=True, special_tokens=[], limit_alphabet=None, initial_alphabet=[], continuing_subword_prefix="##", end_of_word_suffix=None, ): pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py
# Generated content DO NOT EDIT from .. import pre_tokenizers PreTokenizer = pre_tokenizers.PreTokenizer BertPreTokenizer = pre_tokenizers.BertPreTokenizer ByteLevel = pre_tokenizers.ByteLevel CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit Digits = pre_tokenizers.Digits Metaspace = pre_tokenizers.Metaspace Punctuation = pre_tokenizers.Punctuation Sequence = pre_tokenizers.Sequence Split = pre_tokenizers.Split UnicodeScripts = pre_tokenizers.UnicodeScripts Whitespace = pre_tokenizers.Whitespace WhitespaceSplit = pre_tokenizers.WhitespaceSplit
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi
# Generated content DO NOT EDIT class PreTokenizer: """ Base class for all pre-tokenizers This class is not supposed to be instantiated directly. Instead, any implementation of a PreTokenizer will return an instance of this class when instantiated. """ def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class BertPreTokenizer(PreTokenizer): """ BertPreTokenizer This pre-tokenizer splits tokens on spaces, and also on punctuation. Each occurence of a punctuation character will be treated separately. """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class ByteLevel(PreTokenizer): """ ByteLevel PreTokenizer This pre-tokenizer takes care of replacing all bytes of the given string with a corresponding representation, as well as splitting into words. Args: add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to add a space to the first word if there isn't already one. This lets us treat `hello` exactly like `say hello`. use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`): Set this to :obj:`False` to prevent this `pre_tokenizer` from using the GPT2 specific regexp for spliting on whitespace. """ def __init__(self, add_prefix_space=True, use_regex=True): pass @staticmethod def alphabet(): """ Returns the alphabet used by this PreTokenizer. Since the ByteLevel works as its name suggests, at the byte level, it encodes each byte value to a unique visible character. This means that there is a total of 256 different characters composing this alphabet. Returns: :obj:`List[str]`: A list of characters that compose the alphabet """ pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class CharDelimiterSplit(PreTokenizer): """ This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)` Args: delimiter: str: The delimiter char that will be used to split input """ def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Digits(PreTokenizer): """ This pre-tokenizer simply splits using the digits in separate tokens Args: individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`): If set to True, digits will each be separated as follows:: "Call 123 please" -> "Call ", "1", "2", "3", " please" If set to False, digits will grouped as follows:: "Call 123 please" -> "Call ", "123", " please" """ def __init__(self, individual_digits=False): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Metaspace(PreTokenizer): """ Metaspace pre-tokenizer This pre-tokenizer replaces any whitespace by the provided replacement character. It then tries to split on these spaces. Args: replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): The replacement character. Must be exactly one character. By default we use the `▁` (U+2581) meta symbol (Same as in SentencePiece). add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to add a space to the first word if there isn't already one. This lets us treat `hello` exactly like `say hello`. """ def __init__(self, replacement="_", add_prefix_space=True): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Punctuation(PreTokenizer): """ This pre-tokenizer simply splits on punctuation as individual characters. Args: behavior (:class:`~tokenizers.SplitDelimiterBehavior`): The behavior to use when splitting. Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next", "contiguous" """ def __init__(self, behavior="isolated"): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Sequence(PreTokenizer): """ This pre-tokenizer composes other pre_tokenizers and applies them in sequence """ def __init__(self, pretokenizers): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Split(PreTokenizer): """ Split PreTokenizer This versatile pre-tokenizer splits using the provided pattern and according to the provided behavior. The pattern can be inverted by making use of the invert flag. Args: pattern (:obj:`str` or :class:`~tokenizers.Regex`): A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex` behavior (:class:`~tokenizers.SplitDelimiterBehavior`): The behavior to use when splitting. Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", "contiguous" invert (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to invert the pattern. """ def __init__(self, pattern, behavior, invert=False): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class UnicodeScripts(PreTokenizer): """ This pre-tokenizer splits on characters that belong to different language family It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too. This mimicks SentencePiece Unigram implementation. """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Whitespace(PreTokenizer): """ This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+` """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class WhitespaceSplit(PreTokenizer): """ This pre-tokenizer simply splits on the whitespace. Works like `.split()` """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/scripts/spm_parity_check.py
import tokenizers from argparse import ArgumentParser import sentencepiece as spm from collections import Counter import json import os import datetime try: from termcolor import colored has_color = True except Exception: has_color = False def main(): parser = ArgumentParser("SentencePiece parity checker") parser.add_argument( "--input-file", "-i", type=str, required=True, help="Which files do you want to train from", ) parser.add_argument( "--model-file", "-m", type=str, required=False, default=None, help="Use a pretrained token file", ) parser.add_argument( "--model-prefix", type=str, default="spm_parity", help="Model prefix for spm_train", ) parser.add_argument( "--vocab-size", "-v", type=int, default=8000, help="Vocab size for spm_train", ) parser.add_argument( "--verbose", action="store_true", help="Verbosity", ) parser.add_argument( "--train", action="store_true", help="Instead of checking the encoder part, we check the trainer part", ) parser.add_argument( "--from-spm", action="store_true", help="Directly load the spm file with it's own normalizer", ) args = parser.parse_args() trained = False if args.model_file is None: spm.SentencePieceTrainer.Train( f"--input={args.input_file} --model_prefix={args.model_prefix}" f" --character_coverage=1.0" f" --max_sentence_length=40000" f" --num_threads=1" f" --vocab_size={args.vocab_size}" ) trained = True args.model_file = f"{args.model_prefix}.model" try: if args.train: check_train(args) else: check_encode(args) finally: if trained: os.remove(f"{args.model_prefix}.model") os.remove(f"{args.model_prefix}.vocab") def check_train(args): sp = spm.SentencePieceProcessor() sp.Load(args.model_file) tokenizer = tokenizers.SentencePieceUnigramTokenizer() tokenizer.train(args.input_file, show_progress=False) spm_tokens = 0 tokenizer_tokens = 0 with open(args.input_file, "r") as f: for i, line in enumerate(f): line = line.strip() ids = sp.EncodeAsIds(line) encoded = tokenizer.encode(line) spm_tokens += len(ids) tokenizer_tokens += len(encoded.ids) vocab = [0 for i in range(args.vocab_size)] spm_vocab = [0 for i in range(args.vocab_size)] for token, index in tokenizer.get_vocab().items(): vocab[index] = token for i in range(args.vocab_size): spm_vocab[i] = sp.id_to_piece(i) # 0 is unk in tokenizers, 0, 1, 2 are unk bos, eos in spm by default. for i, (token, spm_token) in enumerate(zip(vocab[1:], spm_vocab[3:])): if token != spm_token: print(f"First different token is token {i} ({token} != {spm_token})") break print(f"Tokenizer used {tokenizer_tokens}, where spm used {spm_tokens}") assert ( tokenizer_tokens < spm_tokens ), "Our trainer should be at least more efficient than the SPM one" print("Ok our trainer is at least more efficient than the SPM one") def check_diff(spm_diff, tok_diff, sp, tok): if spm_diff == list(reversed(tok_diff)): # AAA -> AA+A vs A+AA case. return True elif len(spm_diff) == len(tok_diff) and tok.decode(spm_diff) == tok.decode( tok_diff ): # Second order OK # Barrich -> Barr + ich vs Bar + rich return True spm_reencoded = sp.encode(sp.decode(spm_diff)) tok_reencoded = tok.encode(tok.decode(spm_diff)).ids if spm_reencoded != spm_diff and spm_reencoded == tok_reencoded: # Type 3 error. # Snehagatha -> # Sne, h, aga, th, a # Sne, ha, gat, ha # Encoding the wrong with sp does not even recover what spm gave us # It fits tokenizer however... return True return False def check_details(line, spm_ids, tok_ids, sp, tok): # Encoding can be the same with same result AAA -> A + AA vs AA + A # We can check that we use at least exactly the same number of tokens. for i, (spm_id, tok_id) in enumerate(zip(spm_ids, tok_ids)): if spm_id != tok_id: break first = i for i, (spm_id, tok_id) in enumerate(zip(reversed(spm_ids), reversed(tok_ids))): if spm_id != tok_id: break last = len(spm_ids) - i spm_diff = spm_ids[first:last] tok_diff = tok_ids[first:last] if check_diff(spm_diff, tok_diff, sp, tok): return True if last - first > 5: # We might have twice a single problem, attempt to subdivide the disjointed tokens into smaller problems spms = Counter(spm_ids[first:last]) toks = Counter(tok_ids[first:last]) removable_tokens = { spm_ for (spm_, si) in spms.items() if toks.get(spm_, 0) == si } min_width = 3 for i in range(last - first - min_width): if all( spm_ids[first + i + j] in removable_tokens for j in range(min_width) ): possible_matches = [ k for k in range(last - first - min_width) if tok_ids[first + k : first + k + min_width] == spm_ids[first + i : first + i + min_width] ] for j in possible_matches: if check_diff( spm_ids[first : first + i], tok_ids[first : first + j], sp, tok ) and check_details( line, spm_ids[first + i : last], tok_ids[first + j : last], sp, tok, ): return True print(f"Spm: {[tok.decode([spm_ids[i]]) for i in range(first, last)]}") try: print(f"Tok: {[tok.decode([tok_ids[i]]) for i in range(first, last)]}") except Exception: pass ok_start = tok.decode(spm_ids[:first]) ok_end = tok.decode(spm_ids[last:]) wrong = tok.decode(spm_ids[first:last]) print() if has_color: print( f"{colored(ok_start, 'grey')}{colored(wrong, 'red')}{colored(ok_end, 'grey')}" ) else: print(wrong) return False def check_encode(args): sp = spm.SentencePieceProcessor() sp.Load(args.model_file) if args.from_spm: tok = tokenizers.SentencePieceUnigramTokenizer.from_spm(args.model_file) else: vocab = [(sp.id_to_piece(i), sp.get_score(i)) for i in range(sp.piece_size())] unk_id = sp.unk_id() tok = tokenizers.SentencePieceUnigramTokenizer(vocab, unk_id) perfect = 0 imperfect = 0 wrong = 0 now = datetime.datetime.now spm_total_time = datetime.timedelta(seconds=0) tok_total_time = datetime.timedelta(seconds=0) with open(args.input_file, "r", encoding="utf-8-sig") as f: for i, line in enumerate(f): line = line.strip() start = now() ids = sp.EncodeAsIds(line) spm_time = now() encoded = tok.encode(line) tok_time = now() spm_total_time += spm_time - start tok_total_time += tok_time - spm_time if args.verbose: if i % 10000 == 0: print( f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})" ) print(f"SPM: {spm_total_time} - TOK: {tok_total_time}") if ids != encoded.ids: if check_details(line, ids, encoded.ids, sp, tok): imperfect += 1 continue else: wrong += 1 else: perfect += 1 assert ids == encoded.ids, f"line {i}: {line} : \n\n{ids}\n{encoded.ids}\n{list(zip(encoded.ids, encoded.tokens))}" print(f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})") total = perfect + imperfect + wrong print( f"Accuracy {perfect * 100 / total:.2f} Slowdown : {tok_total_time/ spm_total_time:.2f}" ) if __name__ == "__main__": main()
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/scripts/convert.py
import transformers from tokenizers.implementations import SentencePieceUnigramTokenizer, BaseTokenizer from tokenizers.processors import TemplateProcessing from tokenizers.models import Unigram, BPE from tokenizers import decoders from tokenizers import Tokenizer, Regex from tokenizers.normalizers import ( StripAccents, NFKD, Lowercase, Sequence, BertNormalizer, Precompiled, Replace, ) from tokenizers.pre_tokenizers import ( Digits, WhitespaceSplit, Metaspace, Sequence as PSequence, ) import json import unicodedata import sys import os import datetime import argparse sys.path.append(".") from spm_parity_check import check_details from sentencepiece_extractor import SentencePieceExtractor def check_number_comma(piece: str) -> bool: return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit() def get_proto(filename: str): try: import sys sys.path.append(".") import sentencepiece_model_pb2 as model except Exception: raise Exception( "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required." ) m = model.ModelProto() m.ParseFromString(open(filename, "rb").read()) return m class Converter: def __init__(self, original_tokenizer): self.original_tokenizer = original_tokenizer def converted(self) -> Tokenizer: raise NotImplementedError() class SpmConverter(Converter): def __init__(self, *args): super().__init__(*args) self.proto = get_proto(self.original_tokenizer.vocab_file) def vocab(self, proto): return [(piece.piece, piece.score) for piece in proto.pieces] def unk_id(self, proto): return proto.trainer_spec.unk_id def tokenizer(self, proto): model_type = proto.trainer_spec.model_type vocab = self.vocab(proto) unk_id = self.unk_id(proto) if model_type == 1: tokenizer = Tokenizer(Unigram(vocab, unk_id)) elif model_type == 2: vocab, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract() tokenizer = Tokenizer( BPE(vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True) ) else: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" ) return tokenizer def normalizer(self, proto): precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap return Sequence([Precompiled(precompiled_charsmap), Replace(Regex(" {2,}"), " ")]) def post_processor(self, tokenizer): return None def converted(self): tokenizer = self.tokenizer(self.proto) # Tokenizer assemble tokenizer.normalizer = self.normalizer(self.proto) replacement = "▁" add_prefix_space = True tokenizer.pre_tokenizer = Metaspace( replacement=replacement, add_prefix_space=add_prefix_space ) tokenizer.decoder = decoders.Metaspace( replacement=replacement, add_prefix_space=add_prefix_space ) post_processor = self.post_processor(tokenizer) if post_processor: tokenizer.post_processor = post_processor # TODO what parameters should we give ? parameters = {} return BaseTokenizer(tokenizer, parameters) class AlbertConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): normalizers = [Replace("``", '"'), Replace("''", '"')] if not self.original_tokenizer.keep_accents: normalizers.append(NFKD()) normalizers.append(StripAccents()) if self.original_tokenizer.do_lower_case: normalizers.append(Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap normalizers.append(Precompiled(precompiled_charsmap)) normalizers.append(Replace(Regex(" {2,}"), " ")) return Sequence(normalizers) def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["[CLS]", "$0", "[SEP]"], seq_b=["$1", "[SEP]"], special_tokens=[ ("[CLS]", tokenizer.get_vocab()["[CLS]"]), ("[SEP]", tokenizer.get_vocab()["[SEP]"]), ], ) class CamembertConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>NOTUSED", 0.0), ("<pad>", 0.0), ("</s>NOTUSED", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces] return vocab def unk_id(self, proto): # See vocab unk position return 3 def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["<s>", "$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[ ("<s>", tokenizer.get_vocab()["<s>"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class MBartConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [ ("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ] return vocab def unk_id(self, proto): return 3 def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "</s>", "en_XX"], seq_b=["$1", "</s>"], special_tokens=[ ("en_XX", tokenizer.get_vocab()["en_XX"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class XLMRobertaConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["<s>", "$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[ ("<s>", tokenizer.get_vocab()["<s>"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class XLNetConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): normalizers = [Replace("``", '"'), Replace("''", '"')] if not self.original_tokenizer.keep_accents: normalizers.append(NFKD()) normalizers.append(StripAccents()) if self.original_tokenizer.do_lower_case: normalizers.append(Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap normalizers.append(Precompiled(precompiled_charsmap)) normalizers.append(Replace(Regex(" {2,}"), " ")) return Sequence(normalizers) def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "<sep>", "<cls>"], seq_b=["$1", "<sep>"], special_tokens=[ ("<sep>", tokenizer.get_vocab()["<sep>"]), ("<cls>", tokenizer.get_vocab()["<cls>"]), ], ) class ReformerConverter(SpmConverter): pass class PegasusConverter(SpmConverter): offset = 103 def vocab(self, proto): vocab = [ (self.original_tokenizer.pad_token, 0), (self.original_tokenizer.eos_token, 0), ] vocab += [(f"unk_{i}", -100) for i in range(2, 2 + self.offset)] vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]] return vocab def unk_id(self, proto): return proto.trainer_spec.unk_id + self.offset def post_processor(self, tokenizer): eos = self.original_tokenizer.eos_token return TemplateProcessing( seq_a=["$0", eos], seq_b=["$1", eos], special_tokens=[(eos, tokenizer.get_vocab()[eos])], ) class T5Converter(SpmConverter): def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[("</s>", tokenizer.get_vocab()["</s>"])], ) CONVERTERS = { "AlbertTokenizer": AlbertConverter, "CamembertTokenizer": CamembertConverter, "XLMRobertaTokenizer": XLMRobertaConverter, "MBartTokenizer": MBartConverter, "XLNetTokenizer": XLNetConverter, "ReformerTokenizer": ReformerConverter, "PegasusTokenizer": PegasusConverter, "T5Tokenizer": T5Converter, } def check(pretrained, filename): transformer_tokenizer = transformers.AutoTokenizer.from_pretrained(pretrained) converter_class = CONVERTERS[transformer_tokenizer.__class__.__name__] tokenizer = converter_class(transformer_tokenizer).converted() now = datetime.datetime.now trans_total_time = datetime.timedelta(seconds=0) tok_total_time = datetime.timedelta(seconds=0) with open(filename, "r") as f: for i, line in enumerate(f): line = line.strip() start = now() ids = transformer_tokenizer.encode(line) trans = now() tok_ids = tokenizer.encode(line).ids tok = now() trans_total_time += trans - start tok_total_time += tok - trans if ids != tok_ids: if check_details(line, ids, tok_ids, transformer_tokenizer, tokenizer): continue assert ids == tok_ids, f"Error in line {i}: {line} {ids} != {tok_ids}" tokenizer.save(f"{pretrained.replace('/', '-')}.json") return ("OK", trans_total_time / tok_total_time) def main(): pretraineds = [ "albert-base-v1", "albert-large-v1", "albert-xlarge-v1", "albert-xxlarge-v1", "albert-base-v2", "albert-large-v2", "albert-xlarge-v2", "albert-xxlarge-v2", "camembert-base", "xlm-roberta-base", "xlm-roberta-large", "xlm-roberta-large-finetuned-conll02-dutch", "xlm-roberta-large-finetuned-conll02-spanish", "xlm-roberta-large-finetuned-conll03-english", "xlm-roberta-large-finetuned-conll03-german", "facebook/mbart-large-en-ro", "facebook/mbart-large-cc25", "xlnet-base-cased", "xlnet-large-cased", "google/reformer-crime-and-punishment", "t5-small", "google/pegasus-large", ] parser = argparse.ArgumentParser() parser.add_argument( "--filename", required=True, type=str, help="The filename that we are going to encode in both versions to check that conversion worked", ) parser.add_argument( "--models", type=lambda s: s.split(","), default=pretraineds, help=f"The pretrained tokenizers you want to test agains, (default: {pretraineds})", ) args = parser.parse_args() print(args.filename) model_len = 50 status_len = 6 speedup_len = 8 print(f"|{'Model':^{model_len}}|{'Status':^{status_len}}|{'Speedup':^{speedup_len}}|") print(f"|{'-'*model_len}|{'-'*status_len}|{'-'*speedup_len}|") for pretrained in args.models: status, speedup = check(pretrained, args.filename) print( f"|{pretrained:<{model_len}}|{status:^{status_len}}|{speedup:^{speedup_len - 1}.2f}x|" ) if __name__ == "__main__": main()
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/scripts/sentencepiece_extractor.py
from argparse import ArgumentParser from json import dump from logging import basicConfig, getLogger from os import linesep, remove from os.path import exists from tempfile import NamedTemporaryFile from typing import Dict, List, Tuple from requests import get from sentencepiece import SentencePieceProcessor from tqdm import trange, tqdm basicConfig() logger = getLogger() class SentencePieceExtractor: """ Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece """ def __init__(self, model: str): # Get SentencePiece self.sp = SentencePieceProcessor() self.sp.Load(model) def extract(self) -> Tuple[Dict[str, int], List[Tuple]]: sp = self.sp vocab = {sp.id_to_piece(index): index for index in trange(sp.GetPieceSize())} # Merges merges = [] for piece_l in tqdm(vocab.keys(), total=sp.GetPieceSize()): for piece_r in vocab.keys(): merge = f"{piece_l}{piece_r}" piece_id = vocab.get(merge, None) if piece_id: merges += [(piece_l, piece_r, piece_id)] merges = sorted(merges, key=lambda val: val[2]) merges = [(val[0], val[1]) for val in merges] return vocab, merges class YouTokenToMeExtractor: """ Extractor implementation for YouTokenToMe trained models format. Model are as follow: vocab_size nb_merges piece piece_id ...(repeated vocab_size) piece_id_left piece_id_right piece_id ...(repeated nb merges) """ def __init__(self, model: str): self._model = model def extract(self) -> Tuple[Dict[str, int], List[Tuple]]: with open(self._model, "r") as model_f: # Retrieve information nb_pieces, nb_merges = map(int, model_f.readline().split()) vocab, merges = {}, [] # Vocab for _ in trange(nb_pieces): piece, piece_id = map(int, model_f.readline().split()) vocab[piece_id] = chr(piece) # Merges for _ in trange(nb_merges): piece_id_l, piece_id_r, piece = map(int, model_f.readline().split()) piece_l, piece_r = vocab[piece_id_l], vocab[piece_id_r] vocab[piece] = f"{piece_l}{piece_r}" merges += [(piece_l, piece_r)] # Special tokens unk, pad, bos, eos = map(int, model_f.readline().split()) vocab[unk] = "<unk>" vocab[pad] = "<pad>" vocab[bos] = "<bos>" vocab[eos] = "<eos>" # Invert key and value for vocab vocab = dict(zip(vocab.values(), vocab.keys())) return vocab, merges if __name__ == "__main__": parser = ArgumentParser("SentencePiece vocab extractor") parser.add_argument( "--provider", type=str, required=True, choices=["sentencepiece", "youtokentome"], help="Indicate the format of the file.", ) parser.add_argument( "--model", type=str, required=True, help="SentencePiece model to extract vocab from." ) parser.add_argument( "--vocab-output-path", type=str, required=True, help="Path where the vocab.json file will be extracted", ) parser.add_argument( "--merges-output-path", type=str, required=True, help="Path where the merges file will be extracted", ) # Parse cli arguments args = parser.parse_args() try: if args.model.startswith("http"): # Saving model with NamedTemporaryFile("wb", delete=False) as f: logger.info("Writing content from {} to {}".format(args.model, f.name)) response = get(args.model, allow_redirects=True) f.write(response.content) args.remote_model = args.model args.model = f.name # Allocate extractor extractor = ( SentencePieceExtractor if args.provider == "sentencepiece" else YouTokenToMeExtractor ) extractor = extractor(args.model) logger.info(f"Using {type(extractor).__name__}") # Open output files and let's extract model information with open(args.vocab_output_path, "w") as vocab_f: with open(args.merges_output_path, "w") as merges_f: # Do the extraction vocab, merges = extractor.extract() # Save content dump(vocab, vocab_f) merges_f.writelines(map(lambda x: f"{x[0]} {x[1]}{linesep}", merges)) finally: # If model was downloaded from internet we need to cleanup the tmp folder. if hasattr(args, "remote_model") and exists(args.model): remove(args.model)
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/decoders.rs
use std::sync::{Arc, RwLock}; use crate::utils::PyChar; use crate::utils::PyPattern; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::de::Error; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use tk::decoders::bpe::BPEDecoder; use tk::decoders::byte_fallback::ByteFallback; use tk::decoders::byte_level::ByteLevel; use tk::decoders::ctc::CTC; use tk::decoders::fuse::Fuse; use tk::decoders::metaspace::Metaspace; use tk::decoders::sequence::Sequence; use tk::decoders::strip::Strip; use tk::decoders::wordpiece::WordPiece; use tk::decoders::DecoderWrapper; use tk::normalizers::replace::Replace; use tk::Decoder; use tokenizers as tk; use super::error::ToPyResult; /// Base class for all decoders /// /// This class is not supposed to be instantiated directly. Instead, any implementation of /// a Decoder will return an instance of this class when instantiated. #[pyclass(dict, module = "tokenizers.decoders", name = "Decoder", subclass)] #[derive(Clone, Deserialize, Serialize)] pub struct PyDecoder { #[serde(flatten)] pub(crate) decoder: PyDecoderWrapper, } impl PyDecoder { pub(crate) fn new(decoder: PyDecoderWrapper) -> Self { PyDecoder { decoder } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match &self.decoder { PyDecoderWrapper::Custom(_) => Py::new(py, base)?.into_py(py), PyDecoderWrapper::Wrapped(inner) => match &*inner.as_ref().read().unwrap() { DecoderWrapper::Metaspace(_) => Py::new(py, (PyMetaspaceDec {}, base))?.into_py(py), DecoderWrapper::WordPiece(_) => Py::new(py, (PyWordPieceDec {}, base))?.into_py(py), DecoderWrapper::ByteFallback(_) => { Py::new(py, (PyByteFallbackDec {}, base))?.into_py(py) } DecoderWrapper::Strip(_) => Py::new(py, (PyStrip {}, base))?.into_py(py), DecoderWrapper::Fuse(_) => Py::new(py, (PyFuseDec {}, base))?.into_py(py), DecoderWrapper::ByteLevel(_) => Py::new(py, (PyByteLevelDec {}, base))?.into_py(py), DecoderWrapper::Replace(_) => Py::new(py, (PyReplaceDec {}, base))?.into_py(py), DecoderWrapper::BPE(_) => Py::new(py, (PyBPEDecoder {}, base))?.into_py(py), DecoderWrapper::CTC(_) => Py::new(py, (PyCTCDecoder {}, base))?.into_py(py), DecoderWrapper::Sequence(_) => { Py::new(py, (PySequenceDecoder {}, base))?.into_py(py) } }, }) } } impl Decoder for PyDecoder { fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { self.decoder.decode_chain(tokens) } } #[pymethods] impl PyDecoder { #[staticmethod] fn custom(decoder: PyObject) -> Self { let decoder = PyDecoderWrapper::Custom(Arc::new(RwLock::new(CustomDecoder::new(decoder)))); PyDecoder::new(decoder) } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.decoder).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Decoder: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.decoder = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Decoder: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Decode the given list of tokens to a final string /// /// Args: /// tokens (:obj:`List[str]`): /// The list of tokens to decode /// /// Returns: /// :obj:`str`: The decoded string #[pyo3(text_signature = "(self, tokens)")] fn decode(&self, tokens: Vec<String>) -> PyResult<String> { ToPyResult(self.decoder.decode(tokens)).into() } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let PyDecoderWrapper::Wrapped(ref wrap) = super_.decoder { if let DecoderWrapper::$variant(ref dec) = *wrap.read().unwrap() { dec.$($name)+ } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyDecoderWrapper::Wrapped(ref wrap) = super_.decoder { if let DecoderWrapper::$variant(ref mut dec) = *wrap.write().unwrap() { dec.$name = $value; } } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyDecoderWrapper::Wrapped(ref wrap) = super_.decoder { if let DecoderWrapper::$variant(ref mut dec) = *wrap.write().unwrap() { dec.$name($value); } } }}; } /// ByteLevel Decoder /// /// This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel` /// :class:`~tokenizers.pre_tokenizers.PreTokenizer`. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "ByteLevel")] #[pyo3(text_signature = "(self)")] pub struct PyByteLevelDec {} #[pymethods] impl PyByteLevelDec { #[new] #[pyo3(signature = (**_kwargs))] fn new(_kwargs: Option<&PyDict>) -> (Self, PyDecoder) { (PyByteLevelDec {}, ByteLevel::default().into()) } } /// Replace Decoder /// /// This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace` /// :class:`~tokenizers.pre_tokenizers.PreTokenizer`. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Replace")] #[pyo3(text_signature = "(self, pattern, content)")] pub struct PyReplaceDec {} #[pymethods] impl PyReplaceDec { #[new] fn new(pattern: PyPattern, content: String) -> PyResult<(Self, PyDecoder)> { Ok(( PyReplaceDec {}, ToPyResult(Replace::new(pattern, content)).into_py()?.into(), )) } } /// WordPiece Decoder /// /// Args: /// prefix (:obj:`str`, `optional`, defaults to :obj:`##`): /// The prefix to use for subwords that are not a beginning-of-word /// /// cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation, /// and some abbreviated english forms. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "WordPiece")] #[pyo3(text_signature = "(self, prefix=\"##\", cleanup=True)")] pub struct PyWordPieceDec {} #[pymethods] impl PyWordPieceDec { #[getter] fn get_prefix(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, prefix.clone()) } #[setter] fn set_prefix(self_: PyRef<Self>, prefix: String) { setter!(self_, WordPiece, prefix, prefix); } #[getter] fn get_cleanup(self_: PyRef<Self>) -> bool { getter!(self_, WordPiece, cleanup) } #[setter] fn set_cleanup(self_: PyRef<Self>, cleanup: bool) { setter!(self_, WordPiece, cleanup, cleanup); } #[new] #[pyo3(signature = (prefix = String::from("##"), cleanup = true))] fn new(prefix: String, cleanup: bool) -> (Self, PyDecoder) { (PyWordPieceDec {}, WordPiece::new(prefix, cleanup).into()) } } /// ByteFallback Decoder /// ByteFallback is a simple trick which converts tokens looking like `<0x61>` /// to pure bytes, and attempts to make them into a string. If the tokens /// cannot be decoded you will get � instead for each inconvertable byte token /// #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "ByteFallback")] #[pyo3(text_signature = "(self)")] pub struct PyByteFallbackDec {} #[pymethods] impl PyByteFallbackDec { #[new] #[pyo3(signature = ())] fn new() -> (Self, PyDecoder) { (PyByteFallbackDec {}, ByteFallback::new().into()) } } /// Fuse Decoder /// Fuse simply fuses every token into a single string. /// This is the last step of decoding, this decoder exists only if /// there is need to add other decoders *after* the fusion #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Fuse")] #[pyo3(text_signature = "(self)")] pub struct PyFuseDec {} #[pymethods] impl PyFuseDec { #[new] #[pyo3(signature = ())] fn new() -> (Self, PyDecoder) { (PyFuseDec {}, Fuse::new().into()) } } /// Strip normalizer /// Strips n left characters of each token, or n right characters of each token #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Strip")] #[pyo3(text_signature = "(self, content, left=0, right=0)")] pub struct PyStrip {} #[pymethods] impl PyStrip { #[getter] fn get_start(self_: PyRef<Self>) -> usize { getter!(self_, Strip, start) } #[setter] fn set_start(self_: PyRef<Self>, start: usize) { setter!(self_, Strip, start, start) } #[getter] fn get_stop(self_: PyRef<Self>) -> usize { getter!(self_, Strip, stop) } #[setter] fn set_stop(self_: PyRef<Self>, stop: usize) { setter!(self_, Strip, stop, stop) } #[getter] fn get_content(self_: PyRef<Self>) -> char { getter!(self_, Strip, content) } #[setter] fn set_content(self_: PyRef<Self>, content: char) { setter!(self_, Strip, content, content) } #[new] #[pyo3(signature = (content=' ', left=0, right=0))] fn new(content: char, left: usize, right: usize) -> (Self, PyDecoder) { (PyStrip {}, Strip::new(content, left, right).into()) } } /// Metaspace Decoder /// /// Args: /// replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): /// The replacement character. Must be exactly one character. By default we /// use the `▁` (U+2581) meta symbol (Same as in SentencePiece). /// /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Metaspace")] #[pyo3(text_signature = "(self, replacement = \"▁\", add_prefix_space = True)")] pub struct PyMetaspaceDec {} #[pymethods] impl PyMetaspaceDec { #[getter] fn get_replacement(self_: PyRef<Self>) -> String { getter!(self_, Metaspace, get_replacement().to_string()) } #[setter] fn set_replacement(self_: PyRef<Self>, replacement: PyChar) { setter!(self_, Metaspace, @set_replacement, replacement.0); } #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, Metaspace, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, Metaspace, add_prefix_space, add_prefix_space); } #[new] #[pyo3(signature = (replacement = PyChar('▁'), add_prefix_space = true))] fn new(replacement: PyChar, add_prefix_space: bool) -> (Self, PyDecoder) { ( PyMetaspaceDec {}, Metaspace::new(replacement.0, add_prefix_space).into(), ) } } /// BPEDecoder Decoder /// /// Args: /// suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`): /// The suffix that was used to caracterize an end-of-word. This suffix will /// be replaced by whitespaces during the decoding #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "BPEDecoder")] #[pyo3(text_signature = "(self, suffix=\"</w>\")")] pub struct PyBPEDecoder {} #[pymethods] impl PyBPEDecoder { #[getter] fn get_suffix(self_: PyRef<Self>) -> String { getter!(self_, BPE, suffix.clone()) } #[setter] fn set_suffix(self_: PyRef<Self>, suffix: String) { setter!(self_, BPE, suffix, suffix); } #[new] #[pyo3(signature = (suffix = String::from("</w>")))] fn new(suffix: String) -> (Self, PyDecoder) { (PyBPEDecoder {}, BPEDecoder::new(suffix).into()) } } /// CTC Decoder /// /// Args: /// pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`): /// The pad token used by CTC to delimit a new token. /// word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`): /// The word delimiter token. It will be replaced by a <space> /// cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to cleanup some tokenization artifacts. /// Mainly spaces before punctuation, and some abbreviated english forms. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "CTC")] #[pyo3(text_signature = "(self, pad_token=\"<pad>\", word_delimiter_token=\"|\", cleanup=True)")] pub struct PyCTCDecoder {} #[pymethods] impl PyCTCDecoder { #[getter] fn get_pad_token(self_: PyRef<Self>) -> String { getter!(self_, CTC, pad_token.clone()) } #[setter] fn set_pad_token(self_: PyRef<Self>, pad_token: String) { setter!(self_, CTC, pad_token, pad_token); } #[getter] fn get_word_delimiter_token(self_: PyRef<Self>) -> String { getter!(self_, CTC, word_delimiter_token.clone()) } #[setter] fn set_word_delimiter_token(self_: PyRef<Self>, word_delimiter_token: String) { setter!(self_, CTC, word_delimiter_token, word_delimiter_token); } #[getter] fn get_cleanup(self_: PyRef<Self>) -> bool { getter!(self_, CTC, cleanup) } #[setter] fn set_cleanup(self_: PyRef<Self>, cleanup: bool) { setter!(self_, CTC, cleanup, cleanup); } #[new] #[pyo3(signature = ( pad_token = String::from("<pad>"), word_delimiter_token = String::from("|"), cleanup = true ))] fn new(pad_token: String, word_delimiter_token: String, cleanup: bool) -> (Self, PyDecoder) { ( PyCTCDecoder {}, CTC::new(pad_token, word_delimiter_token, cleanup).into(), ) } } /// Sequence Decoder /// /// Args: /// decoders (:obj:`List[Decoder]`) /// The decoders that need to be chained #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name="Sequence")] #[pyo3(text_signature = "(self, decoders)")] pub struct PySequenceDecoder {} #[pymethods] impl PySequenceDecoder { #[new] #[pyo3(signature = (decoders_py))] fn new(decoders_py: &PyList) -> PyResult<(Self, PyDecoder)> { let mut decoders: Vec<DecoderWrapper> = Vec::with_capacity(decoders_py.len()); for decoder_py in decoders_py.iter() { let decoder: PyRef<PyDecoder> = decoder_py.extract()?; let decoder = match &decoder.decoder { PyDecoderWrapper::Wrapped(inner) => inner, PyDecoderWrapper::Custom(_) => unimplemented!(), }; decoders.push(decoder.read().unwrap().clone()); } Ok((PySequenceDecoder {}, Sequence::new(decoders).into())) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } } #[derive(Clone)] pub(crate) struct CustomDecoder { inner: PyObject, } impl CustomDecoder { pub(crate) fn new(inner: PyObject) -> Self { CustomDecoder { inner } } } impl Decoder for CustomDecoder { fn decode(&self, tokens: Vec<String>) -> tk::Result<String> { Python::with_gil(|py| { let decoded = self .inner .call_method(py, "decode", (tokens,), None)? .extract(py)?; Ok(decoded) }) } fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { Python::with_gil(|py| { let decoded = self .inner .call_method(py, "decode_chain", (tokens,), None)? .extract(py)?; Ok(decoded) }) } } impl Serialize for CustomDecoder { fn serialize<S>(&self, _serializer: S) -> std::result::Result<S::Ok, S::Error> where S: Serializer, { Err(serde::ser::Error::custom( "Custom PyDecoder cannot be serialized", )) } } impl<'de> Deserialize<'de> for CustomDecoder { fn deserialize<D>(_deserializer: D) -> std::result::Result<Self, D::Error> where D: Deserializer<'de>, { Err(D::Error::custom("PyDecoder cannot be deserialized")) } } #[derive(Clone, Deserialize, Serialize)] #[serde(untagged)] pub(crate) enum PyDecoderWrapper { Custom(Arc<RwLock<CustomDecoder>>), Wrapped(Arc<RwLock<DecoderWrapper>>), } impl<I> From<I> for PyDecoderWrapper where I: Into<DecoderWrapper>, { fn from(norm: I) -> Self { PyDecoderWrapper::Wrapped(Arc::new(RwLock::new(norm.into()))) } } impl<I> From<I> for PyDecoder where I: Into<DecoderWrapper>, { fn from(dec: I) -> Self { PyDecoder { decoder: dec.into().into(), } } } impl Decoder for PyDecoderWrapper { fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { match self { PyDecoderWrapper::Wrapped(inner) => inner.read().unwrap().decode_chain(tokens), PyDecoderWrapper::Custom(inner) => inner.read().unwrap().decode_chain(tokens), } } } /// Decoders Module #[pymodule] pub fn decoders(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyDecoder>()?; m.add_class::<PyByteLevelDec>()?; m.add_class::<PyReplaceDec>()?; m.add_class::<PyWordPieceDec>()?; m.add_class::<PyByteFallbackDec>()?; m.add_class::<PyFuseDec>()?; m.add_class::<PyStrip>()?; m.add_class::<PyMetaspaceDec>()?; m.add_class::<PyBPEDecoder>()?; m.add_class::<PyCTCDecoder>()?; m.add_class::<PySequenceDecoder>()?; Ok(()) } #[cfg(test)] mod test { use std::sync::{Arc, RwLock}; use pyo3::prelude::*; use tk::decoders::metaspace::Metaspace; use tk::decoders::DecoderWrapper; use crate::decoders::{CustomDecoder, PyDecoder, PyDecoderWrapper}; #[test] fn get_subtype() { Python::with_gil(|py| { let py_dec = PyDecoder::new(Metaspace::default().into()); let py_meta = py_dec.get_as_subtype(py).unwrap(); assert_eq!("Metaspace", py_meta.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let py_wrapped: PyDecoderWrapper = Metaspace::default().into(); let py_ser = serde_json::to_string(&py_wrapped).unwrap(); let rs_wrapped = DecoderWrapper::Metaspace(Metaspace::default()); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_ser, rs_ser); let py_dec: PyDecoder = serde_json::from_str(&rs_ser).unwrap(); match py_dec.decoder { PyDecoderWrapper::Wrapped(msp) => match *msp.as_ref().read().unwrap() { DecoderWrapper::Metaspace(_) => {} _ => panic!("Expected Metaspace"), }, _ => panic!("Expected wrapped, not custom."), } let obj = Python::with_gil(|py| { let py_msp = PyDecoder::new(Metaspace::default().into()); let obj: PyObject = Py::new(py, py_msp).unwrap().into_py(py); obj }); let py_seq = PyDecoderWrapper::Custom(Arc::new(RwLock::new(CustomDecoder::new(obj)))); assert!(serde_json::to_string(&py_seq).is_err()); } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/token.rs
use pyo3::prelude::*; use tk::Token; #[pyclass(module = "tokenizers", name = "Token")] #[derive(Clone)] pub struct PyToken { token: Token, } impl From<Token> for PyToken { fn from(token: Token) -> Self { Self { token } } } impl From<PyToken> for Token { fn from(token: PyToken) -> Self { token.token } } #[pymethods] impl PyToken { #[new] fn new(id: u32, value: String, offsets: (usize, usize)) -> PyToken { Token::new(id, value, offsets).into() } #[getter] fn get_id(&self) -> u32 { self.token.id } #[getter] fn get_value(&self) -> &str { &self.token.value } #[getter] fn get_offsets(&self) -> (usize, usize) { self.token.offsets } fn as_tuple(&self) -> (u32, &str, (usize, usize)) { (self.token.id, &self.token.value, self.token.offsets) } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/processors.rs
use std::convert::TryInto; use std::sync::Arc; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use crate::encoding::PyEncoding; use crate::error::ToPyResult; use serde::{Deserialize, Serialize}; use tk::processors::bert::BertProcessing; use tk::processors::byte_level::ByteLevel; use tk::processors::roberta::RobertaProcessing; use tk::processors::sequence::Sequence; use tk::processors::template::{SpecialToken, Template}; use tk::processors::PostProcessorWrapper; use tk::{Encoding, PostProcessor}; use tokenizers as tk; /// Base class for all post-processors /// /// This class is not supposed to be instantiated directly. Instead, any implementation of /// a PostProcessor will return an instance of this class when instantiated. #[pyclass( dict, module = "tokenizers.processors", name = "PostProcessor", subclass )] #[derive(Clone, Deserialize, Serialize)] pub struct PyPostProcessor { #[serde(flatten)] pub processor: Arc<PostProcessorWrapper>, } impl PyPostProcessor { pub fn new(processor: Arc<PostProcessorWrapper>) -> Self { PyPostProcessor { processor } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match self.processor.as_ref() { PostProcessorWrapper::ByteLevel(_) => Py::new(py, (PyByteLevel {}, base))?.into_py(py), PostProcessorWrapper::Bert(_) => Py::new(py, (PyBertProcessing {}, base))?.into_py(py), PostProcessorWrapper::Roberta(_) => { Py::new(py, (PyRobertaProcessing {}, base))?.into_py(py) } PostProcessorWrapper::Template(_) => { Py::new(py, (PyTemplateProcessing {}, base))?.into_py(py) } PostProcessorWrapper::Sequence(_) => Py::new(py, (PySequence {}, base))?.into_py(py), }) } } impl PostProcessor for PyPostProcessor { fn added_tokens(&self, is_pair: bool) -> usize { self.processor.added_tokens(is_pair) } fn process_encodings( &self, encodings: Vec<Encoding>, add_special_tokens: bool, ) -> tk::Result<Vec<Encoding>> { self.processor .process_encodings(encodings, add_special_tokens) } } #[pymethods] impl PyPostProcessor { fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(self.processor.as_ref()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PostProcessor: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.processor = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PostProcessor: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Return the number of special tokens that would be added for single/pair sentences. /// /// Args: /// is_pair (:obj:`bool`): /// Whether the input would be a pair of sequences /// /// Returns: /// :obj:`int`: The number of tokens to add #[pyo3(text_signature = "(self, is_pair)")] fn num_special_tokens_to_add(&self, is_pair: bool) -> usize { self.processor.added_tokens(is_pair) } /// Post-process the given encodings, generating the final one /// /// Args: /// encoding (:class:`~tokenizers.Encoding`): /// The encoding for the first sequence /// /// pair (:class:`~tokenizers.Encoding`, `optional`): /// The encoding for the pair sequence /// /// add_special_tokens (:obj:`bool`): /// Whether to add the special tokens /// /// Return: /// :class:`~tokenizers.Encoding`: The final encoding #[pyo3(signature = (encoding, pair = None, add_special_tokens = true))] #[pyo3(text_signature = "(self, encoding, pair=None, add_special_tokens=True)")] fn process( &self, encoding: &PyEncoding, pair: Option<&PyEncoding>, add_special_tokens: bool, ) -> PyResult<PyEncoding> { let final_encoding = ToPyResult(self.processor.process( encoding.encoding.clone(), pair.map(|e| e.encoding.clone()), add_special_tokens, )) .into_py()?; Ok(final_encoding.into()) } } /// This post-processor takes care of adding the special tokens needed by /// a Bert model: /// /// - a SEP token /// - a CLS token /// /// Args: /// sep (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the SEP token, and its id /// /// cls (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the CLS token, and its id #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "BertProcessing")] #[pyo3(text_signature = "(self, sep, cls)")] pub struct PyBertProcessing {} #[pymethods] impl PyBertProcessing { #[new] fn new(sep: (String, u32), cls: (String, u32)) -> (Self, PyPostProcessor) { ( PyBertProcessing {}, PyPostProcessor::new(Arc::new(BertProcessing::new(sep, cls).into())), ) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [("", 0), ("", 0)]) } } /// This post-processor takes care of adding the special tokens needed by /// a Roberta model: /// /// - a SEP token /// - a CLS token /// /// It also takes care of trimming the offsets. /// By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't /// want the offsets to include these whitespaces, then this PostProcessor should be initialized /// with :obj:`trim_offsets=True` /// /// Args: /// sep (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the SEP token, and its id /// /// cls (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the CLS token, and its id /// /// trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to trim the whitespaces from the produced offsets. /// /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether the add_prefix_space option was enabled during pre-tokenization. This /// is relevant because it defines the way the offsets are trimmed out. #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "RobertaProcessing")] #[pyo3(text_signature = "(self, sep, cls, trim_offsets=True, add_prefix_space=True)")] pub struct PyRobertaProcessing {} #[pymethods] impl PyRobertaProcessing { #[new] #[pyo3(signature = (sep, cls, trim_offsets = true, add_prefix_space = true))] fn new( sep: (String, u32), cls: (String, u32), trim_offsets: bool, add_prefix_space: bool, ) -> (Self, PyPostProcessor) { let proc = RobertaProcessing::new(sep, cls) .trim_offsets(trim_offsets) .add_prefix_space(add_prefix_space); ( PyRobertaProcessing {}, PyPostProcessor::new(Arc::new(proc.into())), ) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [("", 0), ("", 0)]) } } /// This post-processor takes care of trimming the offsets. /// /// By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't /// want the offsets to include these whitespaces, then this PostProcessor must be used. /// /// Args: /// trim_offsets (:obj:`bool`): /// Whether to trim the whitespaces from the produced offsets. #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "ByteLevel")] #[pyo3(text_signature = "(self, trim_offsets=True)")] pub struct PyByteLevel {} #[pymethods] impl PyByteLevel { #[new] #[pyo3(signature = (trim_offsets = None, **_kwargs))] fn new(trim_offsets: Option<bool>, _kwargs: Option<&PyDict>) -> (Self, PyPostProcessor) { let mut byte_level = ByteLevel::default(); if let Some(to) = trim_offsets { byte_level = byte_level.trim_offsets(to); } ( PyByteLevel {}, PyPostProcessor::new(Arc::new(byte_level.into())), ) } } #[derive(Clone, Debug)] pub struct PySpecialToken(SpecialToken); impl From<PySpecialToken> for SpecialToken { fn from(v: PySpecialToken) -> Self { v.0 } } impl FromPyObject<'_> for PySpecialToken { fn extract(ob: &PyAny) -> PyResult<Self> { if let Ok(v) = ob.extract::<(String, u32)>() { Ok(Self(v.into())) } else if let Ok(v) = ob.extract::<(u32, String)>() { Ok(Self(v.into())) } else if let Ok(d) = ob.downcast::<PyDict>() { let id = d .get_item("id") .ok_or_else(|| exceptions::PyValueError::new_err("`id` must be specified"))? .extract::<String>()?; let ids = d .get_item("ids") .ok_or_else(|| exceptions::PyValueError::new_err("`ids` must be specified"))? .extract::<Vec<u32>>()?; let tokens = d .get_item("tokens") .ok_or_else(|| exceptions::PyValueError::new_err("`tokens` must be specified"))? .extract::<Vec<String>>()?; Ok(Self( ToPyResult(SpecialToken::new(id, ids, tokens)).into_py()?, )) } else { Err(exceptions::PyTypeError::new_err( "Expected Union[Tuple[str, int], Tuple[int, str], dict]", )) } } } #[derive(Clone, Debug)] pub struct PyTemplate(Template); impl From<PyTemplate> for Template { fn from(v: PyTemplate) -> Self { v.0 } } impl FromPyObject<'_> for PyTemplate { fn extract(ob: &PyAny) -> PyResult<Self> { if let Ok(s) = ob.extract::<&str>() { Ok(Self( s.try_into().map_err(exceptions::PyValueError::new_err)?, )) } else if let Ok(s) = ob.extract::<Vec<&str>>() { Ok(Self( s.try_into().map_err(exceptions::PyValueError::new_err)?, )) } else { Err(exceptions::PyTypeError::new_err( "Expected Union[str, List[str]]", )) } } } /// Provides a way to specify templates in order to add the special tokens to each /// input sequence as relevant. /// /// Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to /// delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first /// sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair /// sequences. The final result looks like this: /// /// - Single sequence: :obj:`[CLS] Hello there [SEP]` /// - Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]` /// /// With the type ids as following:: /// /// [CLS] ... [SEP] ... [SEP] /// 0 0 0 1 1 /// /// You can achieve such behavior using a TemplateProcessing:: /// /// TemplateProcessing( /// single="[CLS] $0 [SEP]", /// pair="[CLS] $A [SEP] $B:1 [SEP]:1", /// special_tokens=[("[CLS]", 1), ("[SEP]", 0)], /// ) /// /// In this example, each input sequence is identified using a ``$`` construct. This identifier /// lets us specify each input sequence, and the type_id to use. When nothing is specified, /// it uses the default values. Here are the different ways to specify it: /// /// - Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B`` /// - Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ... /// - Specifying both: ``$A:0``, ``$B:1``, ... /// /// The same construct is used for special tokens: ``<identifier>(:<type_id>)?``. /// /// **Warning**: You must ensure that you are giving the correct tokens/ids as these /// will be added to the Encoding without any further check. If the given ids correspond /// to something totally different in a `Tokenizer` using this `PostProcessor`, it /// might lead to unexpected results. /// /// Args: /// single (:obj:`Template`): /// The template used for single sequences /// /// pair (:obj:`Template`): /// The template used when both sequences are specified /// /// special_tokens (:obj:`Tokens`): /// The list of special tokens used in each sequences /// /// Types: /// /// Template (:obj:`str` or :obj:`List`): /// - If a :obj:`str` is provided, the whitespace is used as delimiter between tokens /// - If a :obj:`List[str]` is provided, a list of tokens /// /// Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`): /// - A :obj:`Tuple` with both a token and its associated ID, in any order /// - A :obj:`dict` with the following keys: /// - "id": :obj:`str` => The special token id, as specified in the Template /// - "ids": :obj:`List[int]` => The associated IDs /// - "tokens": :obj:`List[str]` => The associated tokens /// /// The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have /// the same length. #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "TemplateProcessing")] #[pyo3(text_signature = "(self, single, pair, special_tokens)")] pub struct PyTemplateProcessing {} #[pymethods] impl PyTemplateProcessing { #[new] #[pyo3(signature = (single = None, pair = None, special_tokens = None))] fn new( single: Option<PyTemplate>, pair: Option<PyTemplate>, special_tokens: Option<Vec<PySpecialToken>>, ) -> PyResult<(Self, PyPostProcessor)> { let mut builder = tk::processors::template::TemplateProcessing::builder(); if let Some(seq) = single { builder.single(seq.into()); } if let Some(seq) = pair { builder.pair(seq.into()); } if let Some(sp) = special_tokens { builder.special_tokens(sp); } let processor = builder .build() .map_err(|e| exceptions::PyValueError::new_err(e.to_string()))?; Ok(( PyTemplateProcessing {}, PyPostProcessor::new(Arc::new(processor.into())), )) } } /// Sequence Processor /// /// Args: /// processors (:obj:`List[PostProcessor]`) /// The processors that need to be chained #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "Sequence")] #[pyo3(text_signature = "(self, processors)")] pub struct PySequence {} #[pymethods] impl PySequence { #[new] #[pyo3(signature = (processors_py))] fn new(processors_py: &PyList) -> (Self, PyPostProcessor) { let mut processors: Vec<PostProcessorWrapper> = Vec::with_capacity(processors_py.len()); for n in processors_py.iter() { let processor: PyRef<PyPostProcessor> = n.extract().unwrap(); let processor = processor.processor.as_ref(); processors.push(processor.clone()); } let sequence_processor = Sequence::new(processors); ( PySequence {}, PyPostProcessor::new(Arc::new(PostProcessorWrapper::Sequence(sequence_processor))), ) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } } /// Processors Module #[pymodule] pub fn processors(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyPostProcessor>()?; m.add_class::<PyBertProcessing>()?; m.add_class::<PyRobertaProcessing>()?; m.add_class::<PyByteLevel>()?; m.add_class::<PyTemplateProcessing>()?; m.add_class::<PySequence>()?; Ok(()) } #[cfg(test)] mod test { use std::sync::Arc; use pyo3::prelude::*; use tk::processors::bert::BertProcessing; use tk::processors::PostProcessorWrapper; use crate::processors::PyPostProcessor; #[test] fn get_subtype() { Python::with_gil(|py| { let py_proc = PyPostProcessor::new(Arc::new( BertProcessing::new(("SEP".into(), 0), ("CLS".into(), 1)).into(), )); let py_bert = py_proc.get_as_subtype(py).unwrap(); assert_eq!( "BertProcessing", py_bert.as_ref(py).get_type().name().unwrap() ); }) } #[test] fn serialize() { let rs_processing = BertProcessing::new(("SEP".into(), 0), ("CLS".into(), 1)); let rs_wrapper: PostProcessorWrapper = rs_processing.clone().into(); let rs_processing_ser = serde_json::to_string(&rs_processing).unwrap(); let rs_wrapper_ser = serde_json::to_string(&rs_wrapper).unwrap(); let py_processing = PyPostProcessor::new(Arc::new(rs_wrapper)); let py_ser = serde_json::to_string(&py_processing).unwrap(); assert_eq!(py_ser, rs_processing_ser); assert_eq!(py_ser, rs_wrapper_ser); let py_processing: PyPostProcessor = serde_json::from_str(&rs_processing_ser).unwrap(); match py_processing.processor.as_ref() { PostProcessorWrapper::Bert(_) => (), _ => panic!("Expected Bert postprocessor."), } let py_processing: PyPostProcessor = serde_json::from_str(&rs_wrapper_ser).unwrap(); match py_processing.processor.as_ref() { PostProcessorWrapper::Bert(_) => (), _ => panic!("Expected Bert postprocessor."), } } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/encoding.rs
use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use tk::tokenizer::{Offsets, PaddingDirection}; use tk::utils::truncation::TruncationDirection; use tokenizers as tk; use crate::error::{deprecation_warning, PyError}; /// The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`. #[pyclass(dict, module = "tokenizers", name = "Encoding")] #[repr(transparent)] pub struct PyEncoding { pub encoding: tk::tokenizer::Encoding, } impl From<tk::tokenizer::Encoding> for PyEncoding { fn from(v: tk::tokenizer::Encoding) -> Self { Self { encoding: v } } } #[pymethods] impl PyEncoding { #[new] fn new() -> Self { Self { encoding: tk::tokenizer::Encoding::default(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.encoding).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Encoding: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.encoding = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Encoding: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } fn __repr__(&self) -> PyResult<String> { Ok(format!( "Encoding(num_tokens={}, attributes=[ids, type_ids, tokens, offsets, \ attention_mask, special_tokens_mask, overflowing])", self.encoding.get_ids().len() )) } fn __len__(&self) -> PyResult<usize> { Ok(self.encoding.len()) } /// Merge the list of encodings into one final :class:`~tokenizers.Encoding` /// /// Args: /// encodings (A :obj:`List` of :class:`~tokenizers.Encoding`): /// The list of encodings that should be merged in one /// /// growing_offsets (:obj:`bool`, defaults to :obj:`True`): /// Whether the offsets should accumulate while merging /// /// Returns: /// :class:`~tokenizers.Encoding`: The resulting Encoding #[staticmethod] #[pyo3(signature = (encodings, growing_offsets = true))] #[pyo3(text_signature = "(encodings, growing_offsets=True)")] fn merge(encodings: Vec<PyRef<PyEncoding>>, growing_offsets: bool) -> PyEncoding { tk::tokenizer::Encoding::merge( encodings.into_iter().map(|e| e.encoding.clone()), growing_offsets, ) .into() } /// The number of sequences represented /// /// Returns: /// :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding` #[getter] fn get_n_sequences(&self) -> usize { self.encoding.n_sequences() } /// Set the given sequence index /// /// Set the given sequence index for the whole range of tokens contained in this /// :class:`~tokenizers.Encoding`. #[pyo3(text_signature = "(self, sequence_id)")] fn set_sequence_id(&mut self, sequence_id: usize) { self.encoding.set_sequence_id(sequence_id); } /// The generated IDs /// /// The IDs are the main input to a Language Model. They are the token indices, /// the numerical representations that a LM understands. /// /// Returns: /// :obj:`List[int]`: The list of IDs #[getter] fn get_ids(&self) -> Vec<u32> { self.encoding.get_ids().to_vec() } /// The generated tokens /// /// They are the string representation of the IDs. /// /// Returns: /// :obj:`List[str]`: The list of tokens #[getter] fn get_tokens(&self) -> Vec<String> { self.encoding.get_tokens().to_vec() } /// The generated word indices. /// /// .. warning:: /// This is deprecated and will be removed in a future version. /// Please use :obj:`~tokenizers.Encoding.word_ids` instead. /// /// They represent the index of the word associated to each token. /// When the input is pre-tokenized, they correspond to the ID of the given input label, /// otherwise they correspond to the words indices as defined by the /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. /// /// For special tokens and such (any token that was generated from something that was /// not part of the input), the output is :obj:`None` /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. #[getter] fn get_words(&self, py: Python<'_>) -> PyResult<Vec<Option<u32>>> { deprecation_warning( py, "0.9.4", "Encoding.words is deprecated, please use Encoding.word_ids instead.", )?; Ok(self.get_word_ids()) } /// The generated word indices. /// /// They represent the index of the word associated to each token. /// When the input is pre-tokenized, they correspond to the ID of the given input label, /// otherwise they correspond to the words indices as defined by the /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. /// /// For special tokens and such (any token that was generated from something that was /// not part of the input), the output is :obj:`None` /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. #[getter] fn get_word_ids(&self) -> Vec<Option<u32>> { self.encoding.get_word_ids().to_vec() } /// The generated sequence indices. /// /// They represent the index of the input sequence associated to each token. /// The sequence id can be None if the token is not related to any input sequence, /// like for example with special tokens. /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index. #[getter] fn get_sequence_ids(&self) -> Vec<Option<usize>> { self.encoding.get_sequence_ids() } /// The generated type IDs /// /// Generally used for tasks like sequence classification or question answering, /// these tokens let the LM know which input sequence corresponds to each tokens. /// /// Returns: /// :obj:`List[int]`: The list of type ids #[getter] fn get_type_ids(&self) -> Vec<u32> { self.encoding.get_type_ids().to_vec() } /// The offsets associated to each token /// /// These offsets let's you slice the input string, and thus retrieve the original /// part that led to producing the corresponding token. /// /// Returns: /// A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets #[getter] fn get_offsets(&self) -> Vec<(usize, usize)> { self.encoding.get_offsets().to_vec() } /// The special token mask /// /// This indicates which tokens are special tokens, and which are not. /// /// Returns: /// :obj:`List[int]`: The special tokens mask #[getter] fn get_special_tokens_mask(&self) -> Vec<u32> { self.encoding.get_special_tokens_mask().to_vec() } /// The attention mask /// /// This indicates to the LM which tokens should be attended to, and which should not. /// This is especially important when batching sequences, where we need to applying /// padding. /// /// Returns: /// :obj:`List[int]`: The attention mask #[getter] fn get_attention_mask(&self) -> Vec<u32> { self.encoding.get_attention_mask().to_vec() } /// A :obj:`List` of overflowing :class:`~tokenizers.Encoding` /// /// When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting /// the output into as many pieces as required to match the specified maximum length. /// This field lets you retrieve all the subsequent pieces. /// /// When you use pairs of sequences, the overflowing pieces will contain enough /// variations to cover all the possible combinations, while respecting the provided /// maximum length. #[getter] fn get_overflowing(&self) -> Vec<PyEncoding> { self.encoding .get_overflowing() .clone() .into_iter() .map(|e| e.into()) .collect() } /// Get the encoded tokens corresponding to the word at the given index /// in one of the input sequences. /// /// Args: /// word_index (:obj:`int`): /// The index of a word in one of the input sequences. /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target word /// /// Returns: /// :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)` #[pyo3(signature = (word_index, sequence_index = 0))] #[pyo3(text_signature = "(self, word_index, sequence_index=0)")] fn word_to_tokens(&self, word_index: u32, sequence_index: usize) -> Option<(usize, usize)> { self.encoding.word_to_tokens(word_index, sequence_index) } /// Get the offsets of the word at the given index in one of the input sequences. /// /// Args: /// word_index (:obj:`int`): /// The index of a word in one of the input sequences. /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target word /// /// Returns: /// :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)` #[pyo3(signature = (word_index, sequence_index = 0))] #[pyo3(text_signature = "(self, word_index, sequence_index=0)")] fn word_to_chars(&self, word_index: u32, sequence_index: usize) -> Option<Offsets> { self.encoding.word_to_chars(word_index, sequence_index) } /// Get the index of the sequence represented by the given token. /// /// In the general use case, this method returns :obj:`0` for a single sequence or /// the first sequence of a pair, and :obj:`1` for the second sequence of a pair /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`int`: The sequence id of the given token #[pyo3(text_signature = "(self, token_index)")] fn token_to_sequence(&self, token_index: usize) -> Option<usize> { self.encoding.token_to_sequence(token_index) } /// Get the offsets of the token at the given index. /// /// The returned offsets are related to the input sequence that contains the /// token. In order to determine in which input sequence it belongs, you /// must call :meth:`~tokenizers.Encoding.token_to_sequence()`. /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)` #[pyo3(text_signature = "(self, token_index)")] fn token_to_chars(&self, token_index: usize) -> Option<Offsets> { let (_, offsets) = self.encoding.token_to_chars(token_index)?; Some(offsets) } /// Get the index of the word that contains the token in one of the input sequences. /// /// The returned word index is related to the input sequence that contains /// the token. In order to determine in which input sequence it belongs, you /// must call :meth:`~tokenizers.Encoding.token_to_sequence()`. /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`int`: The index of the word in the relevant input sequence. #[pyo3(text_signature = "(self, token_index)")] fn token_to_word(&self, token_index: usize) -> Option<u32> { let (_, word_idx) = self.encoding.token_to_word(token_index)?; Some(word_idx) } /// Get the token that contains the char at the given position in the input sequence. /// /// Args: /// char_pos (:obj:`int`): /// The position of a char in the input string /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target char /// /// Returns: /// :obj:`int`: The index of the token that contains this char in the encoded sequence #[pyo3(signature = (char_pos, sequence_index = 0))] #[pyo3(text_signature = "(self, char_pos, sequence_index=0)")] fn char_to_token(&self, char_pos: usize, sequence_index: usize) -> Option<usize> { self.encoding.char_to_token(char_pos, sequence_index) } /// Get the word that contains the char at the given position in the input sequence. /// /// Args: /// char_pos (:obj:`int`): /// The position of a char in the input string /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target char /// /// Returns: /// :obj:`int`: The index of the word that contains this char in the input sequence #[pyo3(signature = (char_pos, sequence_index = 0))] #[pyo3(text_signature = "(self, char_pos, sequence_index=0)")] fn char_to_word(&self, char_pos: usize, sequence_index: usize) -> Option<u32> { self.encoding.char_to_word(char_pos, sequence_index) } /// Pad the :class:`~tokenizers.Encoding` at the given length /// /// Args: /// length (:obj:`int`): /// The desired length /// /// direction: (:obj:`str`, defaults to :obj:`right`): /// The expected padding direction. Can be either :obj:`right` or :obj:`left` /// /// pad_id (:obj:`int`, defaults to :obj:`0`): /// The ID corresponding to the padding token /// /// pad_type_id (:obj:`int`, defaults to :obj:`0`): /// The type ID corresponding to the padding token /// /// pad_token (:obj:`str`, defaults to `[PAD]`): /// The pad token to use #[pyo3(signature = (length, **kwargs))] #[pyo3( text_signature = "(self, length, direction='right', pad_id=0, pad_type_id=0, pad_token='[PAD]')" )] fn pad(&mut self, length: usize, kwargs: Option<&PyDict>) -> PyResult<()> { let mut pad_id = 0; let mut pad_type_id = 0; let mut pad_token = "[PAD]"; let mut direction = PaddingDirection::Right; if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "direction" => { let value: &str = value.extract()?; direction = match value { "left" => Ok(PaddingDirection::Left), "right" => Ok(PaddingDirection::Right), other => Err(PyError(format!( "Unknown `direction`: `{}`. Use \ one of `left` or `right`", other )) .into_pyerr::<exceptions::PyValueError>()), }?; } "pad_id" => pad_id = value.extract()?, "pad_type_id" => pad_type_id = value.extract()?, "pad_token" => pad_token = value.extract()?, _ => println!("Ignored unknown kwarg option {}", key), } } } self.encoding .pad(length, pad_id, pad_type_id, pad_token, direction); Ok(()) } /// Truncate the :class:`~tokenizers.Encoding` at the given length /// /// If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating /// this information is lost. It will be considered as representing a single sequence. /// /// Args: /// max_length (:obj:`int`): /// The desired length /// /// stride (:obj:`int`, defaults to :obj:`0`): /// The length of previous content to be included in each overflowing piece /// /// direction (:obj:`str`, defaults to :obj:`right`): /// Truncate direction #[pyo3(signature = (max_length, stride = 0, direction = "right"))] #[pyo3(text_signature = "(self, max_length, stride=0, direction='right')")] fn truncate(&mut self, max_length: usize, stride: usize, direction: &str) -> PyResult<()> { let tdir = match direction { "left" => Ok(TruncationDirection::Left), "right" => Ok(TruncationDirection::Right), _ => Err(PyError(format!( "Invalid truncation direction value : {}", direction )) .into_pyerr::<exceptions::PyValueError>()), }?; self.encoding.truncate(max_length, stride, tdir); Ok(()) } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/pre_tokenizers.rs
use std::sync::{Arc, RwLock}; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::ser::SerializeStruct; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use tk::normalizer::SplitDelimiterBehavior; use tk::pre_tokenizers::bert::BertPreTokenizer; use tk::pre_tokenizers::byte_level::ByteLevel; use tk::pre_tokenizers::delimiter::CharDelimiterSplit; use tk::pre_tokenizers::digits::Digits; use tk::pre_tokenizers::metaspace::Metaspace; use tk::pre_tokenizers::punctuation::Punctuation; use tk::pre_tokenizers::split::Split; use tk::pre_tokenizers::unicode_scripts::UnicodeScripts; use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use tk::pre_tokenizers::PreTokenizerWrapper; use tk::tokenizer::Offsets; use tk::{PreTokenizedString, PreTokenizer}; use tokenizers as tk; use super::error::ToPyResult; use super::utils::*; /// Base class for all pre-tokenizers /// /// This class is not supposed to be instantiated directly. Instead, any implementation of a /// PreTokenizer will return an instance of this class when instantiated. #[pyclass( dict, module = "tokenizers.pre_tokenizers", name = "PreTokenizer", subclass )] #[derive(Clone, Serialize, Deserialize)] pub struct PyPreTokenizer { #[serde(flatten)] pub(crate) pretok: PyPreTokenizerTypeWrapper, } impl PyPreTokenizer { #[allow(dead_code)] pub(crate) fn new(pretok: PyPreTokenizerTypeWrapper) -> Self { PyPreTokenizer { pretok } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match &self.pretok { PyPreTokenizerTypeWrapper::Sequence(_) => { Py::new(py, (PySequence {}, base))?.into_py(py) } PyPreTokenizerTypeWrapper::Single(ref inner) => { match &*inner.as_ref().read().unwrap() { PyPreTokenizerWrapper::Custom(_) => Py::new(py, base)?.into_py(py), PyPreTokenizerWrapper::Wrapped(inner) => match inner { PreTokenizerWrapper::Whitespace(_) => { Py::new(py, (PyWhitespace {}, base))?.into_py(py) } PreTokenizerWrapper::Split(_) => { Py::new(py, (PySplit {}, base))?.into_py(py) } PreTokenizerWrapper::Punctuation(_) => { Py::new(py, (PyPunctuation {}, base))?.into_py(py) } PreTokenizerWrapper::Sequence(_) => { Py::new(py, (PySequence {}, base))?.into_py(py) } PreTokenizerWrapper::Metaspace(_) => { Py::new(py, (PyMetaspace {}, base))?.into_py(py) } PreTokenizerWrapper::Delimiter(_) => { Py::new(py, (PyCharDelimiterSplit {}, base))?.into_py(py) } PreTokenizerWrapper::WhitespaceSplit(_) => { Py::new(py, (PyWhitespaceSplit {}, base))?.into_py(py) } PreTokenizerWrapper::ByteLevel(_) => { Py::new(py, (PyByteLevel {}, base))?.into_py(py) } PreTokenizerWrapper::BertPreTokenizer(_) => { Py::new(py, (PyBertPreTokenizer {}, base))?.into_py(py) } PreTokenizerWrapper::Digits(_) => { Py::new(py, (PyDigits {}, base))?.into_py(py) } PreTokenizerWrapper::UnicodeScripts(_) => { Py::new(py, (PyUnicodeScripts {}, base))?.into_py(py) } }, } } }) } } impl PreTokenizer for PyPreTokenizer { fn pre_tokenize(&self, normalized: &mut PreTokenizedString) -> tk::Result<()> { self.pretok.pre_tokenize(normalized) } } #[pymethods] impl PyPreTokenizer { #[staticmethod] fn custom(pretok: PyObject) -> Self { PyPreTokenizer { pretok: PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(pretok)).into(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.pretok).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PreTokenizer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { let unpickled = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PreTokenizer: {}", e )) })?; self.pretok = unpickled; Ok(()) } Err(e) => Err(e), } } /// Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place /// /// This method allows to modify a :class:`~tokenizers.PreTokenizedString` to /// keep track of the pre-tokenization, and leverage the capabilities of the /// :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of /// the pre-tokenization of a raw string, you can use /// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` /// /// Args: /// pretok (:class:`~tokenizers.PreTokenizedString): /// The pre-tokenized string on which to apply this /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` #[pyo3(text_signature = "(self, pretok)")] fn pre_tokenize(&self, pretok: &mut PyPreTokenizedString) -> PyResult<()> { ToPyResult(self.pretok.pre_tokenize(&mut pretok.pretok)).into() } /// Pre tokenize the given string /// /// This method provides a way to visualize the effect of a /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the /// alignment, nor does it provide all the capabilities of the /// :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use /// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` /// /// Args: /// sequence (:obj:`str`): /// A string to pre-tokeize /// /// Returns: /// :obj:`List[Tuple[str, Offsets]]`: /// A list of tuple with the pre-tokenized parts and their offsets #[pyo3(text_signature = "(self, sequence)")] fn pre_tokenize_str(&self, s: &str) -> PyResult<Vec<(String, Offsets)>> { let mut pretokenized = tk::tokenizer::PreTokenizedString::from(s); ToPyResult(self.pretok.pre_tokenize(&mut pretokenized)).into_py()?; Ok(pretokenized .get_splits(tk::OffsetReferential::Original, tk::OffsetType::Char) .into_iter() .map(|(s, o, _)| (s.to_owned(), o)) .collect()) } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref pretok)) = *single.read().unwrap() { pretok.$($name)+ } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) = *single.write().unwrap() { pretok.$name = $value; } } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) = *single.write().unwrap() { pretok.$name($value); } } }}; } /// ByteLevel PreTokenizer /// /// This pre-tokenizer takes care of replacing all bytes of the given string /// with a corresponding representation, as well as splitting into words. /// /// Args: /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. /// use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Set this to :obj:`False` to prevent this `pre_tokenizer` from using /// the GPT2 specific regexp for spliting on whitespace. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "ByteLevel")] #[pyo3(text_signature = "(self, add_prefix_space=True, use_regex=True)")] pub struct PyByteLevel {} #[pymethods] impl PyByteLevel { #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, ByteLevel, add_prefix_space, add_prefix_space); } #[getter] fn get_use_regex(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, use_regex) } #[setter] fn set_use_regex(self_: PyRef<Self>, use_regex: bool) { setter!(self_, ByteLevel, use_regex, use_regex); } #[new] #[pyo3(signature = (add_prefix_space = true, use_regex = true, **_kwargs))] fn new( add_prefix_space: bool, use_regex: bool, _kwargs: Option<&PyDict>, ) -> (Self, PyPreTokenizer) { ( PyByteLevel {}, ByteLevel::default() .add_prefix_space(add_prefix_space) .use_regex(use_regex) .into(), ) } /// Returns the alphabet used by this PreTokenizer. /// /// Since the ByteLevel works as its name suggests, at the byte level, it /// encodes each byte value to a unique visible character. This means that there is a /// total of 256 different characters composing this alphabet. /// /// Returns: /// :obj:`List[str]`: A list of characters that compose the alphabet #[staticmethod] #[pyo3(text_signature = "()")] fn alphabet() -> Vec<String> { ByteLevel::alphabet() .into_iter() .map(|c| c.to_string()) .collect() } } /// This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+` #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Whitespace")] #[pyo3(text_signature = "(self)")] pub struct PyWhitespace {} #[pymethods] impl PyWhitespace { #[new] fn new() -> (Self, PyPreTokenizer) { (PyWhitespace {}, Whitespace {}.into()) } } /// This pre-tokenizer simply splits on the whitespace. Works like `.split()` #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "WhitespaceSplit")] #[pyo3(text_signature = "(self)")] pub struct PyWhitespaceSplit {} #[pymethods] impl PyWhitespaceSplit { #[new] fn new() -> (Self, PyPreTokenizer) { (PyWhitespaceSplit {}, WhitespaceSplit.into()) } } /// Split PreTokenizer /// /// This versatile pre-tokenizer splits using the provided pattern and /// according to the provided behavior. The pattern can be inverted by /// making use of the invert flag. /// /// Args: /// pattern (:obj:`str` or :class:`~tokenizers.Regex`): /// A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex` /// /// behavior (:class:`~tokenizers.SplitDelimiterBehavior`): /// The behavior to use when splitting. /// Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", /// "contiguous" /// /// invert (:obj:`bool`, `optional`, defaults to :obj:`False`): /// Whether to invert the pattern. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Split")] #[pyo3(text_signature = "(self, pattern, behavior, invert=False)")] pub struct PySplit {} #[pymethods] impl PySplit { #[new] #[pyo3(signature = (pattern, behavior, invert = false))] fn new( pattern: PyPattern, behavior: PySplitDelimiterBehavior, invert: bool, ) -> PyResult<(Self, PyPreTokenizer)> { Ok(( PySplit {}, ToPyResult(Split::new(pattern, behavior.into(), invert)) .into_py()? .into(), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [" ", "removed"]) } } /// This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)` /// /// Args: /// delimiter: str: /// The delimiter char that will be used to split input #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "CharDelimiterSplit")] pub struct PyCharDelimiterSplit {} #[pymethods] impl PyCharDelimiterSplit { #[getter] fn get_delimiter(self_: PyRef<Self>) -> String { getter!(self_, Delimiter, delimiter.to_string()) } #[setter] fn set_delimiter(self_: PyRef<Self>, delimiter: PyChar) { setter!(self_, Delimiter, delimiter, delimiter.0); } #[new] pub fn new(delimiter: PyChar) -> PyResult<(Self, PyPreTokenizer)> { Ok(( PyCharDelimiterSplit {}, CharDelimiterSplit::new(delimiter.0).into(), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [" "]) } } /// BertPreTokenizer /// /// This pre-tokenizer splits tokens on spaces, and also on punctuation. /// Each occurence of a punctuation character will be treated separately. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "BertPreTokenizer")] #[pyo3(text_signature = "(self)")] pub struct PyBertPreTokenizer {} #[pymethods] impl PyBertPreTokenizer { #[new] fn new() -> (Self, PyPreTokenizer) { (PyBertPreTokenizer {}, BertPreTokenizer.into()) } } /// This pre-tokenizer simply splits on punctuation as individual characters. /// /// Args: /// behavior (:class:`~tokenizers.SplitDelimiterBehavior`): /// The behavior to use when splitting. /// Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next", /// "contiguous" #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Punctuation")] #[pyo3(text_signature = "(self, behavior=\"isolated\")")] pub struct PyPunctuation {} #[pymethods] impl PyPunctuation { #[new] #[pyo3( signature = (behavior = PySplitDelimiterBehavior(SplitDelimiterBehavior::Isolated)))] fn new(behavior: PySplitDelimiterBehavior) -> (Self, PyPreTokenizer) { (PyPunctuation {}, Punctuation::new(behavior.into()).into()) } } /// This pre-tokenizer composes other pre_tokenizers and applies them in sequence #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Sequence")] #[pyo3(text_signature = "(self, pretokenizers)")] pub struct PySequence {} #[pymethods] impl PySequence { #[new] fn new(pre_tokenizers: &PyList) -> PyResult<(Self, PyPreTokenizer)> { let mut sequence = Vec::with_capacity(pre_tokenizers.len()); for n in pre_tokenizers.iter() { let pretokenizer: PyRef<PyPreTokenizer> = n.extract()?; match &pretokenizer.pretok { PyPreTokenizerTypeWrapper::Sequence(inner) => { sequence.extend(inner.iter().cloned()) } PyPreTokenizerTypeWrapper::Single(inner) => sequence.push(inner.clone()), } } Ok(( PySequence {}, PyPreTokenizer::new(PyPreTokenizerTypeWrapper::Sequence(sequence)), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } } /// Metaspace pre-tokenizer /// /// This pre-tokenizer replaces any whitespace by the provided replacement character. /// It then tries to split on these spaces. /// /// Args: /// replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): /// The replacement character. Must be exactly one character. By default we /// use the `▁` (U+2581) meta symbol (Same as in SentencePiece). /// /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Metaspace")] #[pyo3(text_signature = "(self, replacement=\"_\", add_prefix_space=True)")] pub struct PyMetaspace {} #[pymethods] impl PyMetaspace { #[getter] fn get_replacement(self_: PyRef<Self>) -> String { getter!(self_, Metaspace, get_replacement().to_string()) } #[setter] fn set_replacement(self_: PyRef<Self>, replacement: PyChar) { setter!(self_, Metaspace, @set_replacement, replacement.0); } #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, Metaspace, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, Metaspace, add_prefix_space, add_prefix_space); } #[new] #[pyo3(signature = (replacement = PyChar('▁'), add_prefix_space = true, **_kwargs))] fn new( replacement: PyChar, add_prefix_space: bool, _kwargs: Option<&PyDict>, ) -> (Self, PyPreTokenizer) { ( PyMetaspace {}, Metaspace::new(replacement.0, add_prefix_space).into(), ) } } /// This pre-tokenizer simply splits using the digits in separate tokens /// /// Args: /// individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`): /// If set to True, digits will each be separated as follows:: /// /// "Call 123 please" -> "Call ", "1", "2", "3", " please" /// /// If set to False, digits will grouped as follows:: /// /// "Call 123 please" -> "Call ", "123", " please" #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Digits")] #[pyo3(text_signature = "(self, individual_digits=False)")] pub struct PyDigits {} #[pymethods] impl PyDigits { #[getter] fn get_individual_digits(self_: PyRef<Self>) -> bool { getter!(self_, Digits, individual_digits) } #[setter] fn set_individual_digits(self_: PyRef<Self>, individual_digits: bool) { setter!(self_, Digits, individual_digits, individual_digits); } #[new] #[pyo3(signature = (individual_digits = false))] fn new(individual_digits: bool) -> (Self, PyPreTokenizer) { (PyDigits {}, Digits::new(individual_digits).into()) } } /// This pre-tokenizer splits on characters that belong to different language family /// It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt /// Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too. /// This mimicks SentencePiece Unigram implementation. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "UnicodeScripts")] #[pyo3(text_signature = "(self)")] pub struct PyUnicodeScripts {} #[pymethods] impl PyUnicodeScripts { #[new] fn new() -> (Self, PyPreTokenizer) { (PyUnicodeScripts {}, UnicodeScripts::new().into()) } } #[derive(Clone)] pub(crate) struct CustomPreTokenizer { inner: PyObject, } impl CustomPreTokenizer { pub fn new(inner: PyObject) -> Self { Self { inner } } } impl tk::tokenizer::PreTokenizer for CustomPreTokenizer { fn pre_tokenize(&self, sentence: &mut PreTokenizedString) -> tk::Result<()> { Python::with_gil(|py| { let pretok = PyPreTokenizedStringRefMut::new(sentence); let py_pretok = self.inner.as_ref(py); py_pretok.call_method("pre_tokenize", (pretok.get(),), None)?; Ok(()) }) } } impl Serialize for CustomPreTokenizer { fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { Err(serde::ser::Error::custom( "Custom PreTokenizer cannot be serialized", )) } } impl<'de> Deserialize<'de> for CustomPreTokenizer { fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { Err(serde::de::Error::custom( "Custom PreTokenizer cannot be deserialized", )) } } #[derive(Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyPreTokenizerWrapper { Custom(CustomPreTokenizer), Wrapped(PreTokenizerWrapper), } impl Serialize for PyPreTokenizerWrapper { fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error> where S: Serializer, { match self { PyPreTokenizerWrapper::Wrapped(inner) => inner.serialize(serializer), PyPreTokenizerWrapper::Custom(inner) => inner.serialize(serializer), } } } #[derive(Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyPreTokenizerTypeWrapper { Sequence(Vec<Arc<RwLock<PyPreTokenizerWrapper>>>), Single(Arc<RwLock<PyPreTokenizerWrapper>>), } impl Serialize for PyPreTokenizerTypeWrapper { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self { PyPreTokenizerTypeWrapper::Sequence(seq) => { let mut ser = serializer.serialize_struct("Sequence", 2)?; ser.serialize_field("type", "Sequence")?; ser.serialize_field("pretokenizers", seq)?; ser.end() } PyPreTokenizerTypeWrapper::Single(inner) => inner.serialize(serializer), } } } impl<I> From<I> for PyPreTokenizerWrapper where I: Into<PreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizerWrapper::Wrapped(pretok.into()) } } impl<I> From<I> for PyPreTokenizerTypeWrapper where I: Into<PyPreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizerTypeWrapper::Single(Arc::new(RwLock::new(pretok.into()))) } } impl<I> From<I> for PyPreTokenizer where I: Into<PreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizer { pretok: pretok.into().into(), } } } impl PreTokenizer for PyPreTokenizerTypeWrapper { fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> { match self { PyPreTokenizerTypeWrapper::Single(inner) => inner.read().unwrap().pre_tokenize(pretok), PyPreTokenizerTypeWrapper::Sequence(inner) => inner .iter() .try_for_each(|n| n.read().unwrap().pre_tokenize(pretok)), } } } impl PreTokenizer for PyPreTokenizerWrapper { fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> { match self { PyPreTokenizerWrapper::Wrapped(inner) => inner.pre_tokenize(pretok), PyPreTokenizerWrapper::Custom(inner) => inner.pre_tokenize(pretok), } } } /// PreTokenizers Module #[pymodule] pub fn pre_tokenizers(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyPreTokenizer>()?; m.add_class::<PyByteLevel>()?; m.add_class::<PyWhitespace>()?; m.add_class::<PyWhitespaceSplit>()?; m.add_class::<PySplit>()?; m.add_class::<PyBertPreTokenizer>()?; m.add_class::<PyMetaspace>()?; m.add_class::<PyCharDelimiterSplit>()?; m.add_class::<PyPunctuation>()?; m.add_class::<PySequence>()?; m.add_class::<PyDigits>()?; m.add_class::<PyUnicodeScripts>()?; Ok(()) } #[cfg(test)] mod test { use pyo3::prelude::*; use tk::pre_tokenizers::sequence::Sequence; use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use tk::pre_tokenizers::PreTokenizerWrapper; use crate::pre_tokenizers::{ CustomPreTokenizer, PyPreTokenizer, PyPreTokenizerTypeWrapper, PyPreTokenizerWrapper, }; #[test] fn get_subtype() { Python::with_gil(|py| { let py_norm = PyPreTokenizer::new(Whitespace {}.into()); let py_wsp = py_norm.get_as_subtype(py).unwrap(); assert_eq!("Whitespace", py_wsp.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let py_wrapped: PyPreTokenizerWrapper = Whitespace {}.into(); let py_ser = serde_json::to_string(&py_wrapped).unwrap(); let rs_wrapped = PreTokenizerWrapper::Whitespace(Whitespace {}); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_ser, rs_ser); let py_pretok: PyPreTokenizer = serde_json::from_str(&rs_ser).unwrap(); match py_pretok.pretok { PyPreTokenizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::Whitespace(_)) => {} _ => panic!("Expected Whitespace"), }, _ => panic!("Expected wrapped, not custom."), } let py_seq: PyPreTokenizerWrapper = Sequence::new(vec![Whitespace {}.into(), WhitespaceSplit.into()]).into(); let py_wrapper_ser = serde_json::to_string(&py_seq).unwrap(); let rs_wrapped = PreTokenizerWrapper::Sequence(Sequence::new(vec![ Whitespace {}.into(), WhitespaceSplit.into(), ])); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_wrapper_ser, rs_ser); let py_seq = PyPreTokenizer::new(py_seq.into()); let py_ser = serde_json::to_string(&py_seq).unwrap(); assert_eq!(py_wrapper_ser, py_ser); let obj = Python::with_gil(|py| { let py_wsp = PyPreTokenizer::new(Whitespace {}.into()); let obj: PyObject = Py::new(py, py_wsp).unwrap().into_py(py); obj }); let py_seq: PyPreTokenizerWrapper = PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(obj)); assert!(serde_json::to_string(&py_seq).is_err()); } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/trainers.rs
use std::sync::{Arc, RwLock}; use crate::models::PyModel; use crate::tokenizer::PyAddedToken; use crate::utils::PyChar; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::{Deserialize, Serialize}; use tk::models::TrainerWrapper; use tk::Trainer; use tokenizers as tk; /// Base class for all trainers /// /// This class is not supposed to be instantiated directly. Instead, any implementation of a /// Trainer will return an instance of this class when instantiated. #[pyclass(module = "tokenizers.trainers", name = "Trainer", subclass)] #[derive(Clone, Deserialize, Serialize)] pub struct PyTrainer { #[serde(flatten)] pub trainer: Arc<RwLock<TrainerWrapper>>, } impl PyTrainer { #[cfg(test)] pub(crate) fn new(trainer: Arc<RwLock<TrainerWrapper>>) -> Self { PyTrainer { trainer } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match *self.trainer.as_ref().read().unwrap() { TrainerWrapper::BpeTrainer(_) => Py::new(py, (PyBpeTrainer {}, base))?.into_py(py), TrainerWrapper::WordPieceTrainer(_) => { Py::new(py, (PyWordPieceTrainer {}, base))?.into_py(py) } TrainerWrapper::WordLevelTrainer(_) => { Py::new(py, (PyWordLevelTrainer {}, base))?.into_py(py) } TrainerWrapper::UnigramTrainer(_) => { Py::new(py, (PyUnigramTrainer {}, base))?.into_py(py) } }) } } #[pymethods] impl PyTrainer { fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.trainer).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PyTrainer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { let unpickled = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PyTrainer: {}", e )) })?; self.trainer = unpickled; Ok(()) } Err(e) => Err(e), } } } impl Trainer for PyTrainer { type Model = PyModel; fn should_show_progress(&self) -> bool { self.trainer.read().unwrap().should_show_progress() } fn train(&self, model: &mut PyModel) -> tk::Result<Vec<tk::AddedToken>> { self.trainer .read() .unwrap() .train(&mut model.model.write().unwrap()) } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> tk::Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> tk::Result<Vec<String>> + Sync, { self.trainer.write().unwrap().feed(iterator, process) } } impl<I> From<I> for PyTrainer where I: Into<TrainerWrapper>, { fn from(trainer: I) -> Self { PyTrainer { trainer: Arc::new(RwLock::new(trainer.into())), } } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let TrainerWrapper::$variant(ref trainer) = *super_.trainer.read().unwrap() { trainer.$($name)+ } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let TrainerWrapper::$variant(ref mut trainer) = *super_.trainer.write().unwrap() { trainer.$name = $value; } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let TrainerWrapper::$variant(ref mut trainer) = *super_.trainer.write().unwrap() { trainer.$name($value); } }}; } /// Trainer capable of training a BPE model /// /// Args: /// vocab_size (:obj:`int`, `optional`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// min_frequency (:obj:`int`, `optional`): /// The minimum frequency a pair should have in order to be merged. /// /// show_progress (:obj:`bool`, `optional`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): /// A list of special tokens the model should know of. /// /// limit_alphabet (:obj:`int`, `optional`): /// The maximum different characters to keep in the alphabet. /// /// initial_alphabet (:obj:`List[str]`, `optional`): /// A list of characters to include in the initial alphabet, even /// if not seen in the training dataset. /// If the strings contain more than one character, only the first one /// is kept. /// /// continuing_subword_prefix (:obj:`str`, `optional`): /// A prefix to be used for every subword that is not a beginning-of-word. /// /// end_of_word_suffix (:obj:`str`, `optional`): /// A suffix to be used for every subword that is a end-of-word. /// /// max_token_length (:obj:`int`, `optional`): /// Prevents creating tokens longer than the specified size. /// This can help with reducing polluting your vocabulary with /// highly repetitive tokens like `======` for wikipedia /// #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "BpeTrainer")] pub struct PyBpeTrainer {} #[pymethods] impl PyBpeTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> usize { getter!(self_, BpeTrainer, vocab_size) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) { setter!(self_, BpeTrainer, vocab_size, vocab_size); } #[getter] fn get_min_frequency(self_: PyRef<Self>) -> u32 { getter!(self_, BpeTrainer, min_frequency) } #[setter] fn set_min_frequency(self_: PyRef<Self>, freq: u32) { setter!(self_, BpeTrainer, min_frequency, freq); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, BpeTrainer, show_progress) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, BpeTrainer, show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, BpeTrainer, special_tokens .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> { setter!( self_, BpeTrainer, special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.is_special_token = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[getter] fn get_limit_alphabet(self_: PyRef<Self>) -> Option<usize> { getter!(self_, BpeTrainer, limit_alphabet) } #[setter] fn set_limit_alphabet(self_: PyRef<Self>, limit: Option<usize>) { setter!(self_, BpeTrainer, limit_alphabet, limit); } #[getter] fn get_max_token_length(self_: PyRef<Self>) -> Option<usize> { getter!(self_, BpeTrainer, max_token_length) } #[setter] fn set_max_token_length(self_: PyRef<Self>, limit: Option<usize>) { setter!(self_, BpeTrainer, max_token_length, limit); } #[getter] fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> { getter!( self_, BpeTrainer, initial_alphabet.iter().map(|c| c.to_string()).collect() ) } #[setter] fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<PyChar>) { setter!( self_, BpeTrainer, initial_alphabet, alphabet.into_iter().map(|c| c.0).collect() ); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BpeTrainer, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix(self_: PyRef<Self>, prefix: Option<String>) { setter!(self_, BpeTrainer, continuing_subword_prefix, prefix); } #[getter] fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BpeTrainer, end_of_word_suffix.clone()) } #[setter] fn set_end_of_word_suffix(self_: PyRef<Self>, suffix: Option<String>) { setter!(self_, BpeTrainer, end_of_word_suffix, suffix); } #[new] #[pyo3(signature = (**kwargs))] pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::bpe::BpeTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "vocab_size" => builder = builder.vocab_size(val.extract()?), "min_frequency" => builder = builder.min_frequency(val.extract()?), "show_progress" => builder = builder.show_progress(val.extract()?), "special_tokens" => { builder = builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.is_special_token = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ); } "limit_alphabet" => builder = builder.limit_alphabet(val.extract()?), "max_token_length" => builder = builder.max_token_length(val.extract()?), "initial_alphabet" => { let alphabet: Vec<String> = val.extract()?; builder = builder.initial_alphabet( alphabet .into_iter() .filter_map(|s| s.chars().next()) .collect(), ); } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(val.extract()?) } "end_of_word_suffix" => builder = builder.end_of_word_suffix(val.extract()?), _ => println!("Ignored unknown kwargs option {}", key), }; } } Ok((PyBpeTrainer {}, builder.build().into())) } } /// Trainer capable of training a WordPiece model /// /// Args: /// vocab_size (:obj:`int`, `optional`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// min_frequency (:obj:`int`, `optional`): /// The minimum frequency a pair should have in order to be merged. /// /// show_progress (:obj:`bool`, `optional`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): /// A list of special tokens the model should know of. /// /// limit_alphabet (:obj:`int`, `optional`): /// The maximum different characters to keep in the alphabet. /// /// initial_alphabet (:obj:`List[str]`, `optional`): /// A list of characters to include in the initial alphabet, even /// if not seen in the training dataset. /// If the strings contain more than one character, only the first one /// is kept. /// /// continuing_subword_prefix (:obj:`str`, `optional`): /// A prefix to be used for every subword that is not a beginning-of-word. /// /// end_of_word_suffix (:obj:`str`, `optional`): /// A suffix to be used for every subword that is a end-of-word. #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "WordPieceTrainer")] #[pyo3( text_signature = "(self, vocab_size=30000, min_frequency=0, show_progress=True, special_tokens=[], limit_alphabet=None, initial_alphabet= [],continuing_subword_prefix=\"##\", end_of_word_suffix=None)" )] pub struct PyWordPieceTrainer {} #[pymethods] impl PyWordPieceTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> usize { getter!(self_, WordPieceTrainer, vocab_size()) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) { setter!(self_, WordPieceTrainer, @set_vocab_size, vocab_size); } #[getter] fn get_min_frequency(self_: PyRef<Self>) -> u32 { getter!(self_, WordPieceTrainer, min_frequency()) } #[setter] fn set_min_frequency(self_: PyRef<Self>, freq: u32) { setter!(self_, WordPieceTrainer, @set_min_frequency, freq); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, WordPieceTrainer, show_progress()) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, WordPieceTrainer, @set_show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, WordPieceTrainer, special_tokens() .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> { setter!( self_, WordPieceTrainer, @set_special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.is_special_token = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[getter] fn get_limit_alphabet(self_: PyRef<Self>) -> Option<usize> { getter!(self_, WordPieceTrainer, limit_alphabet()) } #[setter] fn set_limit_alphabet(self_: PyRef<Self>, limit: Option<usize>) { setter!(self_, WordPieceTrainer, @set_limit_alphabet, limit); } #[getter] fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> { getter!( self_, WordPieceTrainer, initial_alphabet().iter().map(|c| c.to_string()).collect() ) } #[setter] fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<PyChar>) { setter!( self_, WordPieceTrainer, @set_initial_alphabet, alphabet.into_iter().map(|c| c.0).collect() ); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> { getter!(self_, WordPieceTrainer, continuing_subword_prefix().clone()) } #[setter] fn set_continuing_subword_prefix(self_: PyRef<Self>, prefix: Option<String>) { setter!(self_, WordPieceTrainer, @set_continuing_subword_prefix, prefix); } #[getter] fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> { getter!(self_, WordPieceTrainer, end_of_word_suffix().clone()) } #[setter] fn set_end_of_word_suffix(self_: PyRef<Self>, suffix: Option<String>) { setter!(self_, WordPieceTrainer, @set_end_of_word_suffix, suffix); } #[new] #[pyo3(signature = (** kwargs))] pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::wordpiece::WordPieceTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "vocab_size" => builder = builder.vocab_size(val.extract()?), "min_frequency" => builder = builder.min_frequency(val.extract()?), "show_progress" => builder = builder.show_progress(val.extract()?), "special_tokens" => { builder = builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.is_special_token = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ); } "limit_alphabet" => builder = builder.limit_alphabet(val.extract()?), "initial_alphabet" => { let alphabet: Vec<String> = val.extract()?; builder = builder.initial_alphabet( alphabet .into_iter() .filter_map(|s| s.chars().next()) .collect(), ); } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(val.extract()?) } "end_of_word_suffix" => builder = builder.end_of_word_suffix(val.extract()?), _ => println!("Ignored unknown kwargs option {}", key), }; } } Ok((PyWordPieceTrainer {}, builder.build().into())) } } /// Trainer capable of training a WorldLevel model /// /// Args: /// vocab_size (:obj:`int`, `optional`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// min_frequency (:obj:`int`, `optional`): /// The minimum frequency a pair should have in order to be merged. /// /// show_progress (:obj:`bool`, `optional`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`): /// A list of special tokens the model should know of. #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "WordLevelTrainer")] pub struct PyWordLevelTrainer {} #[pymethods] impl PyWordLevelTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> usize { getter!(self_, WordLevelTrainer, vocab_size) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) { setter!(self_, WordLevelTrainer, vocab_size, vocab_size); } #[getter] fn get_min_frequency(self_: PyRef<Self>) -> u32 { getter!(self_, WordLevelTrainer, min_frequency) } #[setter] fn set_min_frequency(self_: PyRef<Self>, freq: u32) { setter!(self_, WordLevelTrainer, min_frequency, freq); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, WordLevelTrainer, show_progress) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, WordLevelTrainer, show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, WordLevelTrainer, special_tokens .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> { setter!( self_, WordLevelTrainer, special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.is_special_token = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[new] #[pyo3(signature = (**kwargs))] pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::wordlevel::WordLevelTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "vocab_size" => { builder.vocab_size(val.extract()?); } "min_frequency" => { builder.min_frequency(val.extract()?); } "show_progress" => { builder.show_progress(val.extract()?); } "special_tokens" => { builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.is_special_token = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ); } _ => println!("Ignored unknown kwargs option {}", key), } } } Ok(( PyWordLevelTrainer {}, builder .build() .expect("WordLevelTrainerBuilder cannot fail") .into(), )) } } /// Trainer capable of training a Unigram model /// /// Args: /// vocab_size (:obj:`int`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// show_progress (:obj:`bool`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`): /// A list of special tokens the model should know of. /// /// initial_alphabet (:obj:`List[str]`): /// A list of characters to include in the initial alphabet, even /// if not seen in the training dataset. /// If the strings contain more than one character, only the first one /// is kept. /// /// shrinking_factor (:obj:`float`): /// The shrinking factor used at each step of the training to prune the /// vocabulary. /// /// unk_token (:obj:`str`): /// The token used for out-of-vocabulary tokens. /// /// max_piece_length (:obj:`int`): /// The maximum length of a given token. /// /// n_sub_iterations (:obj:`int`): /// The number of iterations of the EM algorithm to perform before /// pruning the vocabulary. #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "UnigramTrainer")] #[pyo3( text_signature = "(self, vocab_size=8000, show_progress=True, special_tokens=[], shrinking_factor=0.75, unk_token=None, max_piece_length=16, n_sub_iterations=2)" )] pub struct PyUnigramTrainer {} #[pymethods] impl PyUnigramTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> u32 { getter!(self_, UnigramTrainer, vocab_size) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: u32) { setter!(self_, UnigramTrainer, vocab_size, vocab_size); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, UnigramTrainer, show_progress) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, UnigramTrainer, show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, UnigramTrainer, special_tokens .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> { setter!( self_, UnigramTrainer, special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.is_special_token = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[getter] fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> { getter!( self_, UnigramTrainer, initial_alphabet.iter().map(|c| c.to_string()).collect() ) } #[setter] fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<PyChar>) { setter!( self_, UnigramTrainer, initial_alphabet, alphabet.into_iter().map(|c| c.0).collect() ); } #[new] #[pyo3(signature = (**kwargs))] pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::unigram::UnigramTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "vocab_size" => builder.vocab_size(val.extract()?), "show_progress" => builder.show_progress(val.extract()?), "n_sub_iterations" => builder.n_sub_iterations(val.extract()?), "shrinking_factor" => builder.shrinking_factor(val.extract()?), "unk_token" => builder.unk_token(val.extract()?), "max_piece_length" => builder.max_piece_length(val.extract()?), "seed_size" => builder.seed_size(val.extract()?), "initial_alphabet" => { let alphabet: Vec<String> = val.extract()?; builder.initial_alphabet( alphabet .into_iter() .filter_map(|s| s.chars().next()) .collect(), ) } "special_tokens" => builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.is_special_token = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ), _ => { println!("Ignored unknown kwargs option {}", key); &mut builder } }; } } let trainer: tokenizers::models::unigram::UnigramTrainer = builder.build().map_err(|e| { exceptions::PyException::new_err(format!("Cannot build UnigramTrainer: {}", e)) })?; Ok((PyUnigramTrainer {}, trainer.into())) } } /// Trainers Module #[pymodule] pub fn trainers(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyTrainer>()?; m.add_class::<PyBpeTrainer>()?; m.add_class::<PyWordPieceTrainer>()?; m.add_class::<PyWordLevelTrainer>()?; m.add_class::<PyUnigramTrainer>()?; Ok(()) } #[cfg(test)] mod tests { use super::*; use tk::models::bpe::trainer::BpeTrainer; #[test] fn get_subtype() { Python::with_gil(|py| { let py_trainer = PyTrainer::new(Arc::new(RwLock::new(BpeTrainer::default().into()))); let py_bpe = py_trainer.get_as_subtype(py).unwrap(); assert_eq!("BpeTrainer", py_bpe.as_ref(py).get_type().name().unwrap()); }) } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/tokenizer.rs
use std::collections::{hash_map::DefaultHasher, HashMap}; use std::hash::{Hash, Hasher}; use numpy::{npyffi, PyArray1}; use pyo3::class::basic::CompareOp; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use pyo3::AsPyPointer; use tk::models::bpe::BPE; use tk::tokenizer::{ Model, PaddingDirection, PaddingParams, PaddingStrategy, PostProcessor, TokenizerImpl, TruncationDirection, TruncationParams, TruncationStrategy, }; use tk::utils::iter::ResultShunt; use tokenizers as tk; use super::decoders::PyDecoder; use super::encoding::PyEncoding; use super::error::{PyError, ToPyResult}; use super::models::PyModel; use super::normalizers::PyNormalizer; use super::pre_tokenizers::PyPreTokenizer; use super::trainers::PyTrainer; use crate::processors::PyPostProcessor; use crate::utils::{MaybeSizedIterator, PyBufferedIterator}; /// Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`. /// It can have special options that defines the way it should behave. /// /// Args: /// content (:obj:`str`): The content of the token /// /// single_word (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should only match single words. If :obj:`True`, this /// token will never match inside of a word. For example the token ``ing`` would match /// on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`. /// The notion of "`inside of a word`" is defined by the word boundaries pattern in /// regular expressions (ie. the token should start and end with word boundaries). /// /// lstrip (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should strip all potential whitespaces on its left side. /// If :obj:`True`, this token will greedily match any whitespace on its left. For /// example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text /// ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left). /// /// rstrip (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should strip all potential whitespaces on its right /// side. If :obj:`True`, this token will greedily match any whitespace on its right. /// It works just like :obj:`lstrip` but on the right. /// /// normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): /// Defines whether this token should match against the normalized version of the input /// text. For example, with the added token ``"yesterday"``, and a normalizer in charge of /// lowercasing the text, the token could be extract from the input ``"I saw a lion /// Yesterday"``. /// #[pyclass(dict, module = "tokenizers", name = "AddedToken")] #[pyo3( text_signature = "(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True)" )] pub struct PyAddedToken { pub content: String, pub is_special_token: bool, pub single_word: Option<bool>, pub lstrip: Option<bool>, pub rstrip: Option<bool>, pub normalized: Option<bool>, } impl PyAddedToken { pub fn from<S: Into<String>>(content: S, is_special_token: Option<bool>) -> Self { Self { content: content.into(), is_special_token: is_special_token.unwrap_or(false), single_word: None, lstrip: None, rstrip: None, normalized: None, } } pub fn get_token(&self) -> tk::tokenizer::AddedToken { let mut token = tk::AddedToken::from(&self.content, self.is_special_token); if let Some(sw) = self.single_word { token = token.single_word(sw); } if let Some(ls) = self.lstrip { token = token.lstrip(ls); } if let Some(rs) = self.rstrip { token = token.rstrip(rs); } if let Some(n) = self.normalized { token = token.normalized(n); } token } pub fn as_pydict<'py>(&self, py: Python<'py>) -> PyResult<&'py PyDict> { let dict = PyDict::new(py); let token = self.get_token(); dict.set_item("content", token.content)?; dict.set_item("single_word", token.single_word)?; dict.set_item("lstrip", token.lstrip)?; dict.set_item("rstrip", token.rstrip)?; dict.set_item("normalized", token.normalized)?; Ok(dict) } } impl From<tk::AddedToken> for PyAddedToken { fn from(token: tk::AddedToken) -> Self { Self { content: token.content, single_word: Some(token.single_word), lstrip: Some(token.lstrip), rstrip: Some(token.rstrip), normalized: Some(token.normalized), is_special_token: !token.normalized, } } } #[pymethods] impl PyAddedToken { #[new] #[pyo3(signature = (content=None, **kwargs))] fn __new__(content: Option<&str>, kwargs: Option<&PyDict>) -> PyResult<Self> { let mut token = PyAddedToken::from(content.unwrap_or(""), None); if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "single_word" => token.single_word = Some(value.extract()?), "lstrip" => token.lstrip = Some(value.extract()?), "rstrip" => token.rstrip = Some(value.extract()?), "normalized" => token.normalized = Some(value.extract()?), _ => println!("Ignored unknown kwarg option {}", key), } } } Ok(token) } fn __getstate__<'py>(&self, py: Python<'py>) -> PyResult<&'py PyDict> { self.as_pydict(py) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyDict>(py) { Ok(state) => { for (key, value) in state { let key: &str = key.extract()?; match key { "content" => self.content = value.extract()?, "single_word" => self.single_word = Some(value.extract()?), "lstrip" => self.lstrip = Some(value.extract()?), "rstrip" => self.rstrip = Some(value.extract()?), "normalized" => self.normalized = Some(value.extract()?), _ => {} } } Ok(()) } Err(e) => Err(e), } } /// Get the content of this :obj:`AddedToken` #[getter] fn get_content(&self) -> &str { &self.content } /// Get the value of the :obj:`rstrip` option #[getter] fn get_rstrip(&self) -> bool { self.get_token().rstrip } /// Get the value of the :obj:`lstrip` option #[getter] fn get_lstrip(&self) -> bool { self.get_token().lstrip } /// Get the value of the :obj:`single_word` option #[getter] fn get_single_word(&self) -> bool { self.get_token().single_word } /// Get the value of the :obj:`normalized` option #[getter] fn get_normalized(&self) -> bool { self.get_token().normalized } fn __str__(&self) -> PyResult<&str> { Ok(&self.content) } fn __repr__(&self) -> PyResult<String> { let bool_to_python = |p| match p { true => "True", false => "False", }; let token = self.get_token(); Ok(format!( "AddedToken(\"{}\", rstrip={}, lstrip={}, single_word={}, normalized={})", self.content, bool_to_python(token.rstrip), bool_to_python(token.lstrip), bool_to_python(token.single_word), bool_to_python(token.normalized) )) } fn __richcmp__(&self, other: Py<PyAddedToken>, op: CompareOp) -> bool { use CompareOp::*; Python::with_gil(|py| match op { Lt | Le | Gt | Ge => false, Eq => self.get_token() == other.borrow(py).get_token(), Ne => self.get_token() != other.borrow(py).get_token(), }) } fn __hash__(&self) -> u64 { let mut hasher = DefaultHasher::new(); self.get_token().hash(&mut hasher); hasher.finish() } } struct TextInputSequence<'s>(tk::InputSequence<'s>); impl<'s> FromPyObject<'s> for TextInputSequence<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { let err = exceptions::PyTypeError::new_err("TextInputSequence must be str"); if let Ok(s) = ob.downcast::<PyString>() { Ok(Self(s.to_string_lossy().into())) } else { Err(err) } } } impl<'s> From<TextInputSequence<'s>> for tk::InputSequence<'s> { fn from(s: TextInputSequence<'s>) -> Self { s.0 } } struct PyArrayUnicode(Vec<String>); impl FromPyObject<'_> for PyArrayUnicode { fn extract(ob: &PyAny) -> PyResult<Self> { // SAFETY Making sure the pointer is a valid numpy array requires calling numpy C code if unsafe { npyffi::PyArray_Check(ob.py(), ob.as_ptr()) } == 0 { return Err(exceptions::PyTypeError::new_err("Expected an np.array")); } let arr = ob.as_ptr() as *mut npyffi::PyArrayObject; // SAFETY Getting all the metadata about the numpy array to check its sanity let (type_num, elsize, alignment, data, nd, flags) = unsafe { let desc = (*arr).descr; ( (*desc).type_num, (*desc).elsize as usize, (*desc).alignment as usize, (*arr).data, (*arr).nd, (*arr).flags, ) }; if nd != 1 { return Err(exceptions::PyTypeError::new_err( "Expected a 1 dimensional np.array", )); } if flags & (npyffi::NPY_ARRAY_C_CONTIGUOUS | npyffi::NPY_ARRAY_F_CONTIGUOUS) == 0 { return Err(exceptions::PyTypeError::new_err( "Expected a contiguous np.array", )); } if type_num != npyffi::types::NPY_TYPES::NPY_UNICODE as i32 { return Err(exceptions::PyTypeError::new_err( "Expected a np.array[dtype='U']", )); } // SAFETY Looking at the raw numpy data to create new owned Rust strings via copies (so it's safe afterwards). unsafe { let n_elem = *(*arr).dimensions as usize; let all_bytes = std::slice::from_raw_parts(data as *const u8, elsize * n_elem); let seq = (0..n_elem) .map(|i| { let bytes = &all_bytes[i * elsize..(i + 1) * elsize]; let unicode = pyo3::ffi::PyUnicode_FromKindAndData( pyo3::ffi::PyUnicode_4BYTE_KIND as _, bytes.as_ptr() as *const _, elsize as isize / alignment as isize, ); let py = ob.py(); let obj = PyObject::from_owned_ptr(py, unicode); let s = obj.downcast::<PyString>(py)?; Ok(s.to_string_lossy().trim_matches(char::from(0)).to_owned()) }) .collect::<PyResult<Vec<_>>>()?; Ok(Self(seq)) } } } impl From<PyArrayUnicode> for tk::InputSequence<'_> { fn from(s: PyArrayUnicode) -> Self { s.0.into() } } struct PyArrayStr(Vec<String>); impl FromPyObject<'_> for PyArrayStr { fn extract(ob: &PyAny) -> PyResult<Self> { let array = ob.downcast::<PyArray1<PyObject>>()?; let seq = array .readonly() .as_array() .iter() .map(|obj| { let s = obj.downcast::<PyString>(ob.py())?; Ok(s.to_string_lossy().into_owned()) }) .collect::<PyResult<Vec<_>>>()?; Ok(Self(seq)) } } impl From<PyArrayStr> for tk::InputSequence<'_> { fn from(s: PyArrayStr) -> Self { s.0.into() } } struct PreTokenizedInputSequence<'s>(tk::InputSequence<'s>); impl<'s> FromPyObject<'s> for PreTokenizedInputSequence<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { if let Ok(seq) = ob.extract::<PyArrayUnicode>() { return Ok(Self(seq.into())); } if let Ok(seq) = ob.extract::<PyArrayStr>() { return Ok(Self(seq.into())); } if let Ok(s) = ob.downcast::<PyList>() { if let Ok(seq) = s.extract::<Vec<&str>>() { return Ok(Self(seq.into())); } } if let Ok(s) = ob.downcast::<PyTuple>() { if let Ok(seq) = s.extract::<Vec<&str>>() { return Ok(Self(seq.into())); } } Err(exceptions::PyTypeError::new_err( "PreTokenizedInputSequence must be Union[List[str], Tuple[str]]", )) } } impl<'s> From<PreTokenizedInputSequence<'s>> for tk::InputSequence<'s> { fn from(s: PreTokenizedInputSequence<'s>) -> Self { s.0 } } struct TextEncodeInput<'s>(tk::EncodeInput<'s>); impl<'s> FromPyObject<'s> for TextEncodeInput<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { if let Ok(i) = ob.extract::<TextInputSequence>() { return Ok(Self(i.into())); } if let Ok((i1, i2)) = ob.extract::<(TextInputSequence, TextInputSequence)>() { return Ok(Self((i1, i2).into())); } if let Ok(arr) = ob.extract::<Vec<&PyAny>>() { if arr.len() == 2 { let first = arr[0].extract::<TextInputSequence>()?; let second = arr[1].extract::<TextInputSequence>()?; return Ok(Self((first, second).into())); } } Err(exceptions::PyTypeError::new_err( "TextEncodeInput must be Union[TextInputSequence, Tuple[InputSequence, InputSequence]]", )) } } impl<'s> From<TextEncodeInput<'s>> for tk::tokenizer::EncodeInput<'s> { fn from(i: TextEncodeInput<'s>) -> Self { i.0 } } struct PreTokenizedEncodeInput<'s>(tk::EncodeInput<'s>); impl<'s> FromPyObject<'s> for PreTokenizedEncodeInput<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { if let Ok(i) = ob.extract::<PreTokenizedInputSequence>() { return Ok(Self(i.into())); } if let Ok((i1, i2)) = ob.extract::<(PreTokenizedInputSequence, PreTokenizedInputSequence)>() { return Ok(Self((i1, i2).into())); } if let Ok(arr) = ob.extract::<Vec<&PyAny>>() { if arr.len() == 2 { let first = arr[0].extract::<PreTokenizedInputSequence>()?; let second = arr[1].extract::<PreTokenizedInputSequence>()?; return Ok(Self((first, second).into())); } } Err(exceptions::PyTypeError::new_err( "PreTokenizedEncodeInput must be Union[PreTokenizedInputSequence, \ Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence]]", )) } } impl<'s> From<PreTokenizedEncodeInput<'s>> for tk::tokenizer::EncodeInput<'s> { fn from(i: PreTokenizedEncodeInput<'s>) -> Self { i.0 } } type Tokenizer = TokenizerImpl<PyModel, PyNormalizer, PyPreTokenizer, PyPostProcessor, PyDecoder>; /// A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input /// and outputs an :class:`~tokenizers.Encoding`. /// /// Args: /// model (:class:`~tokenizers.models.Model`): /// The core algorithm that this :obj:`Tokenizer` should be using. /// #[pyclass(dict, module = "tokenizers", name = "Tokenizer")] #[pyo3(text_signature = "(self, model)")] #[derive(Clone)] pub struct PyTokenizer { tokenizer: Tokenizer, } impl PyTokenizer { fn new(tokenizer: Tokenizer) -> Self { PyTokenizer { tokenizer } } fn from_model(model: PyModel) -> Self { PyTokenizer::new(TokenizerImpl::new(model)) } } #[pymethods] impl PyTokenizer { #[new] fn __new__(model: PyRef<PyModel>) -> Self { PyTokenizer::from_model(model.clone()) } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.tokenizer).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Tokenizer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.tokenizer = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Tokenizer: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { let model = PyModel::from(BPE::default()).into_py(py); PyTuple::new(py, vec![model]) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string. /// /// Args: /// json (:obj:`str`): /// A valid JSON string representing a previously serialized /// :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(json)")] fn from_str(json: &str) -> PyResult<Self> { let tokenizer: PyResult<_> = ToPyResult(json.parse()).into(); Ok(Self::new(tokenizer?)) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path. /// /// Args: /// path (:obj:`str`): /// A path to a local JSON file representing a previously serialized /// :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(path)")] fn from_file(path: &str) -> PyResult<Self> { let tokenizer: PyResult<_> = ToPyResult(Tokenizer::from_file(path)).into(); Ok(Self::new(tokenizer?)) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer. /// /// Args: /// buffer (:obj:`bytes`): /// A buffer containing a previously serialized :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(buffer)")] fn from_buffer(buffer: &PyBytes) -> PyResult<Self> { let tokenizer = serde_json::from_slice(buffer.as_bytes()).map_err(|e| { exceptions::PyValueError::new_err(format!( "Cannot instantiate Tokenizer from buffer: {}", e )) })?; Ok(Self { tokenizer }) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the /// Hugging Face Hub. /// /// Args: /// identifier (:obj:`str`): /// The identifier of a Model on the Hugging Face Hub, that contains /// a tokenizer.json file /// revision (:obj:`str`, defaults to `main`): /// A branch or commit id /// auth_token (:obj:`str`, `optional`, defaults to `None`): /// An optional auth token used to access private repositories on the /// Hugging Face Hub /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(signature = (identifier, revision = String::from("main"), auth_token = None))] #[pyo3(text_signature = "(identifier, revision=\"main\", auth_token=None)")] fn from_pretrained( identifier: &str, revision: String, auth_token: Option<String>, ) -> PyResult<Self> { let params = tk::FromPretrainedParameters { revision, auth_token, user_agent: [("bindings", "Python"), ("version", crate::VERSION)] .iter() .map(|(k, v)| (k.to_string(), v.to_string())) .collect(), }; let tokenizer: PyResult<_> = ToPyResult(Tokenizer::from_pretrained(identifier, Some(params))).into(); Ok(Self::new(tokenizer?)) } /// Gets a serialized string representing this :class:`~tokenizers.Tokenizer`. /// /// Args: /// pretty (:obj:`bool`, defaults to :obj:`False`): /// Whether the JSON string should be pretty formatted. /// /// Returns: /// :obj:`str`: A string representing the serialized Tokenizer #[pyo3(signature = (pretty = false))] #[pyo3(text_signature = "(self, pretty=False)")] fn to_str(&self, pretty: bool) -> PyResult<String> { ToPyResult(self.tokenizer.to_string(pretty)).into() } /// Save the :class:`~tokenizers.Tokenizer` to the file at the given path. /// /// Args: /// path (:obj:`str`): /// A path to a file in which to save the serialized tokenizer. /// /// pretty (:obj:`bool`, defaults to :obj:`True`): /// Whether the JSON file should be pretty formatted. #[pyo3(signature = (path, pretty = true))] #[pyo3(text_signature = "(self, path, pretty=True)")] fn save(&self, path: &str, pretty: bool) -> PyResult<()> { ToPyResult(self.tokenizer.save(path, pretty)).into() } /// Return the number of special tokens that would be added for single/pair sentences. /// :param is_pair: Boolean indicating if the input would be a single sentence or a pair /// :return: #[pyo3(text_signature = "(self, is_pair)")] fn num_special_tokens_to_add(&self, is_pair: bool) -> usize { self.tokenizer .get_post_processor() .map_or(0, |p| p.added_tokens(is_pair)) } /// Get the underlying vocabulary /// /// Args: /// with_added_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to include the added tokens /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary #[pyo3(signature = (with_added_tokens = true))] #[pyo3(text_signature = "(self, with_added_tokens=True)")] fn get_vocab(&self, with_added_tokens: bool) -> HashMap<String, u32> { self.tokenizer.get_vocab(with_added_tokens) } /// Get the size of the underlying vocabulary /// /// Args: /// with_added_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to include the added tokens /// /// Returns: /// :obj:`int`: The size of the vocabulary #[pyo3(signature = (with_added_tokens = true))] #[pyo3(text_signature = "(self, with_added_tokens=True)")] fn get_vocab_size(&self, with_added_tokens: bool) -> usize { self.tokenizer.get_vocab_size(with_added_tokens) } /// Enable truncation /// /// Args: /// max_length (:obj:`int`): /// The max length at which to truncate /// /// stride (:obj:`int`, `optional`): /// The length of the previous first sequence to be included in the overflowing /// sequence /// /// strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`): /// The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or /// ``only_second``. /// /// direction (:obj:`str`, defaults to :obj:`right`): /// Truncate direction #[pyo3(signature = (max_length, **kwargs))] #[pyo3( text_signature = "(self, max_length, stride=0, strategy='longest_first', direction='right')" )] fn enable_truncation(&mut self, max_length: usize, kwargs: Option<&PyDict>) -> PyResult<()> { let mut params = TruncationParams { max_length, ..Default::default() }; if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "stride" => params.stride = value.extract()?, "strategy" => { let value: &str = value.extract()?; params.strategy = match value { "longest_first" => Ok(TruncationStrategy::LongestFirst), "only_first" => Ok(TruncationStrategy::OnlyFirst), "only_second" => Ok(TruncationStrategy::OnlySecond), _ => Err(PyError(format!( "Unknown `strategy`: `{}`. Use \ one of `longest_first`, `only_first`, or `only_second`", value )) .into_pyerr::<exceptions::PyValueError>()), }? } "direction" => { let value: &str = value.extract()?; params.direction = match value { "left" => Ok(TruncationDirection::Left), "right" => Ok(TruncationDirection::Right), _ => Err(PyError(format!( "Unknown `direction`: `{}`. Use \ one of `left` or `right`.", value )) .into_pyerr::<exceptions::PyValueError>()), }? } _ => println!("Ignored unknown kwarg option {}", key), } } } self.tokenizer.with_truncation(Some(params)); Ok(()) } /// Disable truncation #[pyo3(text_signature = "(self)")] fn no_truncation(&mut self) { self.tokenizer.with_truncation(None); } /// Get the currently set truncation parameters /// /// `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead` /// /// Returns: /// (:obj:`dict`, `optional`): /// A dict with the current truncation parameters if truncation is enabled #[getter] fn get_truncation<'py>(&self, py: Python<'py>) -> PyResult<Option<&'py PyDict>> { self.tokenizer.get_truncation().map_or(Ok(None), |params| { let dict = PyDict::new(py); dict.set_item("max_length", params.max_length)?; dict.set_item("stride", params.stride)?; dict.set_item("strategy", params.strategy.as_ref())?; dict.set_item("direction", params.direction.as_ref())?; Ok(Some(dict)) }) } /// Enable the padding /// /// Args: /// direction (:obj:`str`, `optional`, defaults to :obj:`right`): /// The direction in which to pad. Can be either ``right`` or ``left`` /// /// pad_to_multiple_of (:obj:`int`, `optional`): /// If specified, the padding length should always snap to the next multiple of the /// given value. For example if we were going to pad witha length of 250 but /// ``pad_to_multiple_of=8`` then we will pad to 256. /// /// pad_id (:obj:`int`, defaults to 0): /// The id to be used when padding /// /// pad_type_id (:obj:`int`, defaults to 0): /// The type id to be used when padding /// /// pad_token (:obj:`str`, defaults to :obj:`[PAD]`): /// The pad token to be used when padding /// /// length (:obj:`int`, `optional`): /// If specified, the length at which to pad. If not specified we pad using the size of /// the longest sequence in a batch. #[pyo3(signature = (**kwargs))] #[pyo3( text_signature = "(self, direction='right', pad_id=0, pad_type_id=0, pad_token='[PAD]', length=None, pad_to_multiple_of=None)" )] fn enable_padding(&mut self, kwargs: Option<&PyDict>) -> PyResult<()> { let mut params = PaddingParams::default(); if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "direction" => { let value: &str = value.extract()?; params.direction = match value { "left" => Ok(PaddingDirection::Left), "right" => Ok(PaddingDirection::Right), other => Err(PyError(format!( "Unknown `direction`: `{}`. Use \ one of `left` or `right`", other )) .into_pyerr::<exceptions::PyValueError>()), }?; } "pad_to_multiple_of" => { if let Some(multiple) = value.extract()? { params.pad_to_multiple_of = multiple; } } "pad_id" => params.pad_id = value.extract()?, "pad_type_id" => params.pad_type_id = value.extract()?, "pad_token" => params.pad_token = value.extract()?, "max_length" => { println!( "enable_padding(max_length=X) is deprecated, \ use enable_padding(length=X) instead" ); if let Some(l) = value.extract()? { params.strategy = PaddingStrategy::Fixed(l); } else { params.strategy = PaddingStrategy::BatchLongest; } } "length" => { if let Some(l) = value.extract()? { params.strategy = PaddingStrategy::Fixed(l); } else { params.strategy = PaddingStrategy::BatchLongest; } } _ => println!("Ignored unknown kwarg option {}", key), } } } self.tokenizer.with_padding(Some(params)); Ok(()) } /// Disable padding #[pyo3(text_signature = "(self)")] fn no_padding(&mut self) { self.tokenizer.with_padding(None); } /// Get the current padding parameters /// /// `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead` /// /// Returns: /// (:obj:`dict`, `optional`): /// A dict with the current padding parameters if padding is enabled #[getter] fn get_padding<'py>(&self, py: Python<'py>) -> PyResult<Option<&'py PyDict>> { self.tokenizer.get_padding().map_or(Ok(None), |params| { let dict = PyDict::new(py); dict.set_item( "length", match params.strategy { tk::PaddingStrategy::BatchLongest => None, tk::PaddingStrategy::Fixed(size) => Some(size), }, )?; dict.set_item("pad_to_multiple_of", params.pad_to_multiple_of)?; dict.set_item("pad_id", params.pad_id)?; dict.set_item("pad_token", &params.pad_token)?; dict.set_item("pad_type_id", params.pad_type_id)?; dict.set_item("direction", params.direction.as_ref())?; Ok(Some(dict)) }) } /// Encode the given sequence and pair. This method can process raw text sequences /// as well as already pre-tokenized sequences. /// /// Example: /// Here are some examples of the inputs that are accepted:: /// /// encode("A single sequence")` /// encode("A sequence", "And its pair")` /// encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)` /// encode( /// [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ], /// is_pretokenized=True /// ) /// /// Args: /// sequence (:obj:`~tokenizers.InputSequence`): /// The main input sequence we want to encode. This sequence can be either raw /// text or pre-tokenized, according to the ``is_pretokenized`` argument: /// /// - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence` /// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence` /// /// pair (:obj:`~tokenizers.InputSequence`, `optional`): /// An optional input sequence. The expected format is the same that for ``sequence``. /// /// is_pretokenized (:obj:`bool`, defaults to :obj:`False`): /// Whether the input is already pre-tokenized /// /// add_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to add the special tokens /// /// Returns: /// :class:`~tokenizers.Encoding`: The encoded result /// #[pyo3(signature = (sequence, pair = None, is_pretokenized = false, add_special_tokens = true))] #[pyo3( text_signature = "(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True)" )] fn encode( &self, sequence: &PyAny, pair: Option<&PyAny>, is_pretokenized: bool, add_special_tokens: bool, ) -> PyResult<PyEncoding> { let sequence: tk::InputSequence = if is_pretokenized { sequence.extract::<PreTokenizedInputSequence>()?.into() } else { sequence.extract::<TextInputSequence>()?.into() }; let input = match pair { Some(pair) => { let pair: tk::InputSequence = if is_pretokenized { pair.extract::<PreTokenizedInputSequence>()?.into() } else { pair.extract::<TextInputSequence>()?.into() }; tk::EncodeInput::Dual(sequence, pair) } None => tk::EncodeInput::Single(sequence), }; ToPyResult( self.tokenizer .encode_char_offsets(input, add_special_tokens) .map(|e| e.into()), ) .into() } /// Encode the given batch of inputs. This method accept both raw text sequences /// as well as already pre-tokenized sequences. /// /// Example: /// Here are some examples of the inputs that are accepted:: /// /// encode_batch([ /// "A single sequence", /// ("A tuple with a sequence", "And its pair"), /// [ "A", "pre", "tokenized", "sequence" ], /// ([ "A", "pre", "tokenized", "sequence" ], "And its pair") /// ]) /// /// Args: /// input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`): /// A list of single sequences or pair sequences to encode. Each sequence /// can be either raw text or pre-tokenized, according to the ``is_pretokenized`` /// argument: /// /// - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput` /// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput` /// /// is_pretokenized (:obj:`bool`, defaults to :obj:`False`): /// Whether the input is already pre-tokenized /// /// add_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to add the special tokens /// /// Returns: /// A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch /// #[pyo3(signature = (input, is_pretokenized = false, add_special_tokens = true))] #[pyo3(text_signature = "(self, input, is_pretokenized=False, add_special_tokens=True)")] fn encode_batch( &self, py: Python<'_>, input: Vec<&PyAny>, is_pretokenized: bool, add_special_tokens: bool, ) -> PyResult<Vec<PyEncoding>> { let input: Vec<tk::EncodeInput> = input .into_iter() .map(|o| { let input: tk::EncodeInput = if is_pretokenized { o.extract::<PreTokenizedEncodeInput>()?.into() } else { o.extract::<TextEncodeInput>()?.into() }; Ok(input) }) .collect::<PyResult<Vec<tk::EncodeInput>>>()?; py.allow_threads(|| { ToPyResult( self.tokenizer .encode_batch_char_offsets(input, add_special_tokens) .map(|encodings| encodings.into_iter().map(|e| e.into()).collect()), ) .into() }) } /// Decode the given list of ids back to a string /// /// This is used to decode anything coming back from a Language Model /// /// Args: /// ids (A :obj:`List/Tuple` of :obj:`int`): /// The list of ids that we want to decode /// /// skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether the special tokens should be removed from the decoded string /// /// Returns: /// :obj:`str`: The decoded string #[pyo3(signature = (ids, skip_special_tokens = true))] #[pyo3(text_signature = "(self, ids, skip_special_tokens=True)")] fn decode(&self, ids: Vec<u32>, skip_special_tokens: bool) -> PyResult<String> { ToPyResult(self.tokenizer.decode(&ids, skip_special_tokens)).into() } /// Decode a batch of ids back to their corresponding string /// /// Args: /// sequences (:obj:`List` of :obj:`List[int]`): /// The batch of sequences we want to decode /// /// skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether the special tokens should be removed from the decoded strings /// /// Returns: /// :obj:`List[str]`: A list of decoded strings #[pyo3(signature = (sequences, skip_special_tokens = true))] #[pyo3(text_signature = "(self, sequences, skip_special_tokens=True)")] fn decode_batch( &self, py: Python<'_>, sequences: Vec<Vec<u32>>, skip_special_tokens: bool, ) -> PyResult<Vec<String>> { py.allow_threads(|| { let slices = sequences.iter().map(|v| &v[..]).collect::<Vec<&[u32]>>(); ToPyResult(self.tokenizer.decode_batch(&slices, skip_special_tokens)).into() }) } /// Convert the given token to its corresponding id if it exists /// /// Args: /// token (:obj:`str`): /// The token to convert /// /// Returns: /// :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary #[pyo3(text_signature = "(self, token)")] fn token_to_id(&self, token: &str) -> Option<u32> { self.tokenizer.token_to_id(token) } /// Convert the given id to its corresponding token if it exists /// /// Args: /// id (:obj:`int`): /// The id to convert /// /// Returns: /// :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary #[pyo3(text_signature = "(self, id)")] fn id_to_token(&self, id: u32) -> Option<String> { self.tokenizer.id_to_token(id) } /// Add the given tokens to the vocabulary /// /// The given tokens are added only if they don't already exist in the vocabulary. /// Each token then gets a new attributed id. /// /// Args: /// tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): /// The list of tokens we want to add to the vocabulary. Each token can be either a /// string or an instance of :class:`~tokenizers.AddedToken` for more customization. /// /// Returns: /// :obj:`int`: The number of tokens that were created in the vocabulary #[pyo3(text_signature = "(self, tokens)")] fn add_tokens(&mut self, tokens: &PyList) -> PyResult<usize> { let tokens = tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(false)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.is_special_token = false; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Input must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?; Ok(self.tokenizer.add_tokens(&tokens)) } /// Add the given special tokens to the Tokenizer. /// /// If these tokens are already part of the vocabulary, it just let the Tokenizer know about /// them. If they don't exist, the Tokenizer creates them, giving them a new id. /// /// These special tokens will never be processed by the model (ie won't be split into /// multiple tokens), and they can be removed from the output when decoding. /// /// Args: /// tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): /// The list of special tokens we want to add to the vocabulary. Each token can either /// be a string or an instance of :class:`~tokenizers.AddedToken` for more /// customization. /// /// Returns: /// :obj:`int`: The number of tokens that were created in the vocabulary #[pyo3(text_signature = "(self, tokens)")] fn add_special_tokens(&mut self, tokens: &PyList) -> PyResult<usize> { let tokens = tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.is_special_token = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Input must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?; Ok(self.tokenizer.add_special_tokens(&tokens)) } /// Train the Tokenizer using the given files. /// /// Reads the files line by line, while keeping all the whitespace, even new lines. /// If you want to train from data store in-memory, you can check /// :meth:`~tokenizers.Tokenizer.train_from_iterator` /// /// Args: /// files (:obj:`List[str]`): /// A list of path to the files that we should use for training /// /// trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): /// An optional trainer that should be used to train our Model #[pyo3(signature = (files, trainer = None))] #[pyo3(text_signature = "(self, files, trainer = None)")] fn train(&mut self, files: Vec<String>, trainer: Option<&mut PyTrainer>) -> PyResult<()> { let mut trainer = trainer.map_or_else(|| self.tokenizer.get_model().get_trainer(), |t| t.clone()); Python::with_gil(|py| { py.allow_threads(|| { ToPyResult( self.tokenizer .train_from_files(&mut trainer, files) .map(|_| {}), ) .into() }) }) } /// Train the Tokenizer using the provided iterator. /// /// You can provide anything that is a Python Iterator /// /// * A list of sequences :obj:`List[str]` /// * A generator that yields :obj:`str` or :obj:`List[str]` /// * A Numpy array of strings /// * ... /// /// Args: /// iterator (:obj:`Iterator`): /// Any iterator over strings or list of strings /// /// trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): /// An optional trainer that should be used to train our Model /// /// length (:obj:`int`, `optional`): /// The total number of sequences in the iterator. This is used to /// provide meaningful progress tracking #[pyo3(signature = (iterator, trainer = None, length = None))] #[pyo3(text_signature = "(self, iterator, trainer=None, length=None)")] fn train_from_iterator( &mut self, py: Python, iterator: &PyAny, trainer: Option<&mut PyTrainer>, length: Option<usize>, ) -> PyResult<()> { let mut trainer = trainer.map_or_else(|| self.tokenizer.get_model().get_trainer(), |t| t.clone()); let buffered_iter = PyBufferedIterator::new( iterator, |element| { // Each element of the iterator can either be: // - An iterator, to allow batching // - A string if let Ok(s) = element.downcast::<PyString>() { itertools::Either::Right(std::iter::once(s.to_str().map(|s| s.to_owned()))) } else { match element.iter() { Ok(iter) => itertools::Either::Left( iter.map(|i| i?.extract::<String>()) .collect::<Vec<_>>() .into_iter(), ), Err(e) => itertools::Either::Right(std::iter::once(Err(e))), } } }, 256, )?; py.allow_threads(|| { ResultShunt::process(buffered_iter, |iter| { self.tokenizer .train(&mut trainer, MaybeSizedIterator::new(iter, length)) .map(|_| {}) .map_err(|e| exceptions::PyException::new_err(e.to_string())) })? }) } /// Apply all the post-processing steps to the given encodings. /// /// The various steps are: /// /// 1. Truncate according to the set truncation params (provided with /// :meth:`~tokenizers.Tokenizer.enable_truncation`) /// 2. Apply the :class:`~tokenizers.processors.PostProcessor` /// 3. Pad according to the set padding params (provided with /// :meth:`~tokenizers.Tokenizer.enable_padding`) /// /// Args: /// encoding (:class:`~tokenizers.Encoding`): /// The :class:`~tokenizers.Encoding` corresponding to the main sequence. /// /// pair (:class:`~tokenizers.Encoding`, `optional`): /// An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence. /// /// add_special_tokens (:obj:`bool`): /// Whether to add the special tokens /// /// Returns: /// :class:`~tokenizers.Encoding`: The final post-processed encoding #[pyo3(signature = (encoding, pair = None, add_special_tokens = true))] #[pyo3(text_signature = "(self, encoding, pair=None, add_special_tokens=True)")] fn post_process( &self, encoding: &PyEncoding, pair: Option<&PyEncoding>, add_special_tokens: bool, ) -> PyResult<PyEncoding> { ToPyResult( self.tokenizer .post_process( encoding.encoding.clone(), pair.map(|p| p.encoding.clone()), add_special_tokens, ) .map(|e| e.into()), ) .into() } /// The :class:`~tokenizers.models.Model` in use by the Tokenizer #[getter] fn get_model(&self, py: Python<'_>) -> PyResult<PyObject> { self.tokenizer.get_model().get_as_subtype(py) } /// Set the :class:`~tokenizers.models.Model` #[setter] fn set_model(&mut self, model: PyRef<PyModel>) { self.tokenizer.with_model(model.clone()); } /// The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer #[getter] fn get_normalizer(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(n) = self.tokenizer.get_normalizer() { n.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.normalizers.Normalizer` #[setter] fn set_normalizer(&mut self, normalizer: PyRef<PyNormalizer>) { self.tokenizer.with_normalizer(normalizer.clone()); } /// The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer #[getter] fn get_pre_tokenizer(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(pt) = self.tokenizer.get_pre_tokenizer() { pt.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.normalizers.Normalizer` #[setter] fn set_pre_tokenizer(&mut self, pretok: PyRef<PyPreTokenizer>) { self.tokenizer.with_pre_tokenizer(pretok.clone()); } /// The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer #[getter] fn get_post_processor(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(n) = self.tokenizer.get_post_processor() { n.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.processors.PostProcessor` #[setter] fn set_post_processor(&mut self, processor: PyRef<PyPostProcessor>) { self.tokenizer.with_post_processor(processor.clone()); } /// The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer #[getter] fn get_decoder(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(dec) = self.tokenizer.get_decoder() { dec.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.decoders.Decoder` #[setter] fn set_decoder(&mut self, decoder: PyRef<PyDecoder>) { self.tokenizer.with_decoder(decoder.clone()); } } #[cfg(test)] mod test { use super::*; use crate::models::PyModel; use crate::normalizers::{PyNormalizer, PyNormalizerTypeWrapper}; use std::sync::{Arc, RwLock}; use tempfile::NamedTempFile; use tk::normalizers::{Lowercase, NFKC}; #[test] fn serialize() { let mut tokenizer = Tokenizer::new(PyModel::from(BPE::default())); tokenizer.with_normalizer(PyNormalizer::new(PyNormalizerTypeWrapper::Sequence(vec![ Arc::new(RwLock::new(NFKC.into())), Arc::new(RwLock::new(Lowercase.into())), ]))); let tmp = NamedTempFile::new().unwrap().into_temp_path(); tokenizer.save(&tmp, false).unwrap(); Tokenizer::from_file(&tmp).unwrap(); } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/lib.rs
#![warn(clippy::all)] #![allow(clippy::upper_case_acronyms)] // Many false positives with pyo3 it seems &str, and &PyAny get flagged #![allow(clippy::borrow_deref_ref)] extern crate tokenizers as tk; mod decoders; mod encoding; mod error; mod models; mod normalizers; mod pre_tokenizers; mod processors; mod token; mod tokenizer; mod trainers; mod utils; use pyo3::prelude::*; use pyo3::wrap_pymodule; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); // For users using multiprocessing in python, it is quite easy to fork the process running // tokenizers, ending up with a deadlock because we internaly make use of multithreading. So // we register a callback to be called in the event of a fork so that we can warn the user. static mut REGISTERED_FORK_CALLBACK: bool = false; extern "C" fn child_after_fork() { use tk::parallelism::*; if has_parallelism_been_used() && !is_parallelism_configured() { eprintln!( "huggingface/tokenizers: The current process just got forked, after parallelism has \ already been used. Disabling parallelism to avoid deadlocks..." ); eprintln!("To disable this warning, you can either:"); eprintln!( "\t- Avoid using `tokenizers` before the fork if possible\n\ \t- Explicitly set the environment variable {}=(true | false)", ENV_VARIABLE ); set_parallelism(false); } } /// Tokenizers Module #[pymodule] pub fn tokenizers(_py: Python, m: &PyModule) -> PyResult<()> { let _ = env_logger::try_init_from_env("TOKENIZERS_LOG"); // Register the fork callback #[cfg(target_family = "unix")] unsafe { if !REGISTERED_FORK_CALLBACK { libc::pthread_atfork(None, None, Some(child_after_fork)); REGISTERED_FORK_CALLBACK = true; } } m.add_class::<tokenizer::PyTokenizer>()?; m.add_class::<tokenizer::PyAddedToken>()?; m.add_class::<token::PyToken>()?; m.add_class::<encoding::PyEncoding>()?; m.add_class::<utils::PyRegex>()?; m.add_class::<utils::PyNormalizedString>()?; m.add_class::<utils::PyPreTokenizedString>()?; m.add_wrapped(wrap_pymodule!(models::models))?; m.add_wrapped(wrap_pymodule!(pre_tokenizers::pre_tokenizers))?; m.add_wrapped(wrap_pymodule!(decoders::decoders))?; m.add_wrapped(wrap_pymodule!(processors::processors))?; m.add_wrapped(wrap_pymodule!(normalizers::normalizers))?; m.add_wrapped(wrap_pymodule!(trainers::trainers))?; Ok(()) }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/models.rs
use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::sync::{Arc, RwLock}; use crate::token::PyToken; use crate::trainers::PyTrainer; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::{Deserialize, Serialize}; use tk::models::bpe::{BpeBuilder, Merges, Vocab, BPE}; use tk::models::unigram::Unigram; use tk::models::wordlevel::WordLevel; use tk::models::wordpiece::{WordPiece, WordPieceBuilder}; use tk::models::ModelWrapper; use tk::{Model, Token}; use tokenizers as tk; use super::error::{deprecation_warning, ToPyResult}; /// Base class for all models /// /// The model represents the actual tokenization algorithm. This is the part that /// will contain and manage the learned vocabulary. /// /// This class cannot be constructed directly. Please use one of the concrete models. #[pyclass(module = "tokenizers.models", name = "Model", subclass)] #[derive(Clone, Serialize, Deserialize)] pub struct PyModel { #[serde(flatten)] pub model: Arc<RwLock<ModelWrapper>>, } impl PyModel { pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match *self.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => Py::new(py, (PyBPE {}, base))?.into_py(py), ModelWrapper::WordPiece(_) => Py::new(py, (PyWordPiece {}, base))?.into_py(py), ModelWrapper::WordLevel(_) => Py::new(py, (PyWordLevel {}, base))?.into_py(py), ModelWrapper::Unigram(_) => Py::new(py, (PyUnigram {}, base))?.into_py(py), }) } } impl Model for PyModel { type Trainer = PyTrainer; fn tokenize(&self, tokens: &str) -> tk::Result<Vec<Token>> { self.model.read().unwrap().tokenize(tokens) } fn token_to_id(&self, token: &str) -> Option<u32> { self.model.read().unwrap().token_to_id(token) } fn id_to_token(&self, id: u32) -> Option<String> { self.model.read().unwrap().id_to_token(id) } fn get_vocab(&self) -> HashMap<String, u32> { self.model.read().unwrap().get_vocab() } fn get_vocab_size(&self) -> usize { self.model.read().unwrap().get_vocab_size() } fn save(&self, folder: &Path, name: Option<&str>) -> tk::Result<Vec<PathBuf>> { self.model.read().unwrap().save(folder, name) } fn get_trainer(&self) -> Self::Trainer { self.model.read().unwrap().get_trainer().into() } } impl<I> From<I> for PyModel where I: Into<ModelWrapper>, { fn from(model: I) -> Self { Self { model: Arc::new(RwLock::new(model.into())), } } } #[pymethods] impl PyModel { #[new] fn __new__() -> Self { // Instantiate a default empty model. This doesn't really make sense, but we need // to be able to instantiate an empty model for pickle capabilities. PyModel { model: Arc::new(RwLock::new(BPE::default().into())), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.model).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Model: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.model = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Model: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Tokenize a sequence /// /// Args: /// sequence (:obj:`str`): /// A sequence to tokenize /// /// Returns: /// A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens #[pyo3(text_signature = "(self, sequence)")] fn tokenize(&self, sequence: &str) -> PyResult<Vec<PyToken>> { Ok(ToPyResult(self.model.read().unwrap().tokenize(sequence)) .into_py()? .into_iter() .map(|t| t.into()) .collect()) } /// Get the ID associated to a token /// /// Args: /// token (:obj:`str`): /// A token to convert to an ID /// /// Returns: /// :obj:`int`: The ID associated to the token #[pyo3(text_signature = "(self, tokens)")] fn token_to_id(&self, token: &str) -> Option<u32> { self.model.read().unwrap().token_to_id(token) } /// Get the token associated to an ID /// /// Args: /// id (:obj:`int`): /// An ID to convert to a token /// /// Returns: /// :obj:`str`: The token associated to the ID #[pyo3(text_signature = "(self, id)")] fn id_to_token(&self, id: u32) -> Option<String> { self.model.read().unwrap().id_to_token(id) } /// Save the current model /// /// Save the current model in the given folder, using the given prefix for the various /// files that will get created. /// Any file with the same name that already exists in this folder will be overwritten. /// /// Args: /// folder (:obj:`str`): /// The path to the target folder in which to save the various files /// /// prefix (:obj:`str`, `optional`): /// An optional prefix, used to prefix each file name /// /// Returns: /// :obj:`List[str]`: The list of saved files #[pyo3(text_signature = "(self, folder, prefix)")] fn save<'a>( &self, py: Python<'_>, folder: &str, mut prefix: Option<&'a str>, name: Option<&'a str>, ) -> PyResult<Vec<String>> { if name.is_some() { deprecation_warning( py, "0.10.0", "Parameter `name` of Model.save has been renamed `prefix`", )?; if prefix.is_none() { prefix = name; } } let saved: PyResult<Vec<_>> = ToPyResult(self.model.read().unwrap().save(Path::new(folder), prefix)).into(); Ok(saved? .into_iter() .map(|path| path.to_string_lossy().into_owned()) .collect()) } /// Get the associated :class:`~tokenizers.trainers.Trainer` /// /// Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this /// :class:`~tokenizers.models.Model`. /// /// Returns: /// :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model #[pyo3(text_signature = "(self)")] fn get_trainer(&self, py: Python<'_>) -> PyResult<PyObject> { PyTrainer::from(self.model.read().unwrap().get_trainer()).get_as_subtype(py) } } /// An implementation of the BPE (Byte-Pair Encoding) algorithm /// /// Args: /// vocab (:obj:`Dict[str, int]`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// merges (:obj:`List[Tuple[str, str]]`, `optional`): /// A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]` /// /// cache_capacity (:obj:`int`, `optional`): /// The number of words that the BPE cache can contain. The cache allows /// to speed-up the process by keeping the result of the merge operations /// for a number of words. /// /// dropout (:obj:`float`, `optional`): /// A float between 0 and 1 that represents the BPE dropout to use. /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. /// /// continuing_subword_prefix (:obj:`str`, `optional`): /// The prefix to attach to subword units that don't represent a beginning of word. /// /// end_of_word_suffix (:obj:`str`, `optional`): /// The suffix to attach to subword units that represent an end of word. /// /// fuse_unk (:obj:`bool`, `optional`): /// Whether to fuse any subsequent unknown tokens into a single one /// /// byte_fallback (:obj:`bool`, `optional`): /// Whether to use spm byte-fallback trick (defaults to False) #[pyclass(extends=PyModel, module = "tokenizers.models", name = "BPE")] #[pyo3( text_signature = "(self, vocab=None, merges=None, cache_capacity=None, dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=None, byte_fallback=False)" )] pub struct PyBPE {} impl PyBPE { fn with_builder(mut builder: BpeBuilder, kwargs: Option<&PyDict>) -> PyResult<(Self, PyModel)> { if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "cache_capacity" => builder = builder.cache_capacity(value.extract()?), "dropout" => { if let Some(dropout) = value.extract()? { builder = builder.dropout(dropout); } } "unk_token" => { if let Some(unk) = value.extract()? { builder = builder.unk_token(unk); } } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(value.extract()?) } "end_of_word_suffix" => builder = builder.end_of_word_suffix(value.extract()?), "fuse_unk" => builder = builder.fuse_unk(value.extract()?), "byte_fallback" => builder = builder.byte_fallback(value.extract()?), _ => println!("Ignored unknown kwarg option {}", key), }; } } match builder.build() { Err(e) => Err(exceptions::PyException::new_err(format!( "Error while initializing BPE: {}", e ))), Ok(bpe) => Ok((PyBPE {}, bpe.into())), } } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); let model = super_.model.read().unwrap(); if let ModelWrapper::$variant(ref mo) = *model { mo.$($name)+ } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); let mut model = super_.model.write().unwrap(); if let ModelWrapper::$variant(ref mut mo) = *model { mo.$name = $value; } }}; } #[derive(FromPyObject)] enum PyVocab<'a> { Vocab(Vocab), Filename(&'a str), } #[derive(FromPyObject)] enum PyMerges<'a> { Merges(Merges), Filename(&'a str), } #[pymethods] impl PyBPE { #[getter] fn get_dropout(self_: PyRef<Self>) -> Option<f32> { getter!(self_, BPE, dropout) } #[setter] fn set_dropout(self_: PyRef<Self>, dropout: Option<f32>) { setter!(self_, BPE, dropout, dropout); } #[getter] fn get_unk_token(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: Option<String>) { setter!(self_, BPE, unk_token, unk_token); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix( self_: PyRef<Self>, continuing_subword_prefix: Option<String>, ) { setter!( self_, BPE, continuing_subword_prefix, continuing_subword_prefix ); } #[getter] fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, end_of_word_suffix.clone()) } #[setter] fn set_end_of_word_suffix(self_: PyRef<Self>, end_of_word_suffix: Option<String>) { setter!(self_, BPE, end_of_word_suffix, end_of_word_suffix); } #[getter] fn get_fuse_unk(self_: PyRef<Self>) -> bool { getter!(self_, BPE, fuse_unk) } #[setter] fn set_fuse_unk(self_: PyRef<Self>, fuse_unk: bool) { setter!(self_, BPE, fuse_unk, fuse_unk); } #[getter] fn get_byte_fallback(self_: PyRef<Self>) -> bool { getter!(self_, BPE, byte_fallback) } #[setter] fn set_byte_fallback(self_: PyRef<Self>, byte_fallback: bool) { setter!(self_, BPE, byte_fallback, byte_fallback); } #[new] #[pyo3(signature = (vocab=None, merges=None, **kwargs))] fn new( py: Python<'_>, vocab: Option<PyVocab>, merges: Option<PyMerges>, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { if (vocab.is_some() && merges.is_none()) || (vocab.is_none() && merges.is_some()) { return Err(exceptions::PyValueError::new_err( "`vocab` and `merges` must be both specified", )); } let mut builder = BPE::builder(); if let (Some(vocab), Some(merges)) = (vocab, merges) { match (vocab, merges) { (PyVocab::Vocab(vocab), PyMerges::Merges(merges)) => { builder = builder.vocab_and_merges(vocab, merges); } (PyVocab::Filename(vocab_filename), PyMerges::Filename(merges_filename)) => { deprecation_warning( py, "0.9.0", "BPE.__init__ will not create from files anymore, try `BPE.from_file` instead", )?; builder = builder.files(vocab_filename.to_string(), merges_filename.to_string()); } _ => { return Err(exceptions::PyValueError::new_err( "`vocab` and `merges` must be both be from memory or both filenames", )); } } } PyBPE::with_builder(builder, kwargs) } /// Read a :obj:`vocab.json` and a :obj:`merges.txt` files /// /// This method provides a way to read and parse the content of these files, /// returning the relevant data structures. If you want to instantiate some BPE models /// from memory, this method gives you the expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// merges (:obj:`str`): /// The path to a :obj:`merges.txt` file /// /// Returns: /// A :obj:`Tuple` with the vocab and the merges: /// The vocabulary and merges loaded into memory #[staticmethod] #[pyo3(text_signature = "(self, vocab, merges)")] fn read_file(vocab: &str, merges: &str) -> PyResult<(Vocab, Merges)> { BPE::read_file(vocab, merges).map_err(|e| { exceptions::PyException::new_err(format!( "Error while reading vocab & merges files: {}", e )) }) } /// Instantiate a BPE model from the given files. /// /// This method is roughly equivalent to doing:: /// /// vocab, merges = BPE.read_file(vocab_filename, merges_filename) /// bpe = BPE(vocab, merges) /// /// If you don't need to keep the :obj:`vocab, merges` values lying around, /// this method is more optimized than manually calling /// :meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// merges (:obj:`str`): /// The path to a :obj:`merges.txt` file /// /// Returns: /// :class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files #[classmethod] #[pyo3(signature = (vocab, merges, **kwargs))] #[pyo3(text_signature = "(cls, vocab, merge, **kwargs)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, merges: &str, kwargs: Option<&PyDict>, ) -> PyResult<Py<Self>> { let (vocab, merges) = BPE::read_file(vocab, merges).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading BPE files: {}", e)) })?; Py::new( py, PyBPE::new( py, Some(PyVocab::Vocab(vocab)), Some(PyMerges::Merges(merges)), kwargs, )?, ) } } /// An implementation of the WordPiece algorithm /// /// Args: /// vocab (:obj:`Dict[str, int]`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. /// /// max_input_chars_per_word (:obj:`int`, `optional`): /// The maximum number of characters to authorize in a single word. #[pyclass(extends=PyModel, module = "tokenizers.models", name = "WordPiece")] #[pyo3(text_signature = "(self, vocab, unk_token, max_input_chars_per_word)")] pub struct PyWordPiece {} impl PyWordPiece { fn with_builder( mut builder: WordPieceBuilder, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "unk_token" => { builder = builder.unk_token(val.extract()?); } "max_input_chars_per_word" => { builder = builder.max_input_chars_per_word(val.extract()?); } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(val.extract()?); } _ => println!("Ignored unknown kwargs option {}", key), } } } match builder.build() { Err(e) => Err(exceptions::PyException::new_err(format!( "Error while initializing WordPiece: {}", e ))), Ok(wordpiece) => Ok((PyWordPiece {}, wordpiece.into())), } } } #[pymethods] impl PyWordPiece { #[getter] fn get_unk_token(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: String) { setter!(self_, WordPiece, unk_token, unk_token); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix(self_: PyRef<Self>, continuing_subword_prefix: String) { setter!( self_, WordPiece, continuing_subword_prefix, continuing_subword_prefix ); } #[getter] fn get_max_input_chars_per_word(self_: PyRef<Self>) -> usize { getter!(self_, WordPiece, max_input_chars_per_word) } #[setter] fn set_max_input_chars_per_word(self_: PyRef<Self>, max: usize) { setter!(self_, WordPiece, max_input_chars_per_word, max); } #[new] #[pyo3(signature = (vocab=None, **kwargs))] fn new( py: Python<'_>, vocab: Option<PyVocab>, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { let mut builder = WordPiece::builder(); if let Some(vocab) = vocab { match vocab { PyVocab::Vocab(vocab) => { builder = builder.vocab(vocab); } PyVocab::Filename(vocab_filename) => { deprecation_warning( py, "0.9.0", "WordPiece.__init__ will not create from files anymore, try `WordPiece.from_file` instead", )?; builder = builder.files(vocab_filename.to_string()); } } } PyWordPiece::with_builder(builder, kwargs) } /// Read a :obj:`vocab.txt` file /// /// This method provides a way to read and parse the content of a standard `vocab.txt` /// file as used by the WordPiece Model, returning the relevant data structures. If you /// want to instantiate some WordPiece models from memory, this method gives you the /// expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.txt` file /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` #[staticmethod] #[pyo3(text_signature = "(vocab)")] fn read_file(vocab: &str) -> PyResult<Vocab> { WordPiece::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordPiece file: {}", e)) }) } /// Instantiate a WordPiece model from the given file /// /// This method is roughly equivalent to doing:: /// /// vocab = WordPiece.read_file(vocab_filename) /// wordpiece = WordPiece(vocab) /// /// If you don't need to keep the :obj:`vocab` values lying around, this method is /// more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to /// initialize a :class:`~tokenizers.models.WordPiece` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.txt` file /// /// Returns: /// :class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file #[classmethod] #[pyo3(signature = (vocab, **kwargs))] #[pyo3(text_signature = "(vocab, **kwargs)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, kwargs: Option<&PyDict>, ) -> PyResult<Py<Self>> { let vocab = WordPiece::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordPiece file: {}", e)) })?; Py::new( py, PyWordPiece::new(py, Some(PyVocab::Vocab(vocab)), kwargs)?, ) } } /// An implementation of the WordLevel algorithm /// /// Most simple tokenizer model based on mapping tokens to their corresponding id. /// /// Args: /// vocab (:obj:`str`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. #[pyclass(extends=PyModel, module = "tokenizers.models", name = "WordLevel")] #[pyo3(text_signature = "(self, vocab, unk_token)")] pub struct PyWordLevel {} #[pymethods] impl PyWordLevel { #[getter] fn get_unk_token(self_: PyRef<Self>) -> String { getter!(self_, WordLevel, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: String) { setter!(self_, WordLevel, unk_token, unk_token); } #[new] #[pyo3(signature = (vocab=None, unk_token = None))] fn new( py: Python<'_>, vocab: Option<PyVocab>, unk_token: Option<String>, ) -> PyResult<(Self, PyModel)> { let mut builder = WordLevel::builder(); if let Some(vocab) = vocab { match vocab { PyVocab::Vocab(vocab) => { builder = builder.vocab(vocab); } PyVocab::Filename(vocab_filename) => { deprecation_warning( py, "0.9.0", "WordLevel.__init__ will not create from files anymore, \ try `WordLevel.from_file` instead", )?; builder = builder.files(vocab_filename.to_string()); } }; } if let Some(unk_token) = unk_token { builder = builder.unk_token(unk_token); } Ok(( PyWordLevel {}, builder .build() .map_err(|e| exceptions::PyException::new_err(e.to_string()))? .into(), )) } /// Read a :obj:`vocab.json` /// /// This method provides a way to read and parse the content of a vocabulary file, /// returning the relevant data structures. If you want to instantiate some WordLevel models /// from memory, this method gives you the expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` #[staticmethod] #[pyo3(text_signature = "(vocab)")] fn read_file(vocab: &str) -> PyResult<Vocab> { WordLevel::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordLevel file: {}", e)) }) } /// Instantiate a WordLevel model from the given file /// /// This method is roughly equivalent to doing:: /// /// vocab = WordLevel.read_file(vocab_filename) /// wordlevel = WordLevel(vocab) /// /// If you don't need to keep the :obj:`vocab` values lying around, this method is /// more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to /// initialize a :class:`~tokenizers.models.WordLevel` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// Returns: /// :class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file #[classmethod] #[pyo3(signature = (vocab, unk_token = None))] #[pyo3(text_signature = "(vocab, unk_token)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, unk_token: Option<String>, ) -> PyResult<Py<Self>> { let vocab = WordLevel::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordLevel file: {}", e)) })?; Py::new( py, PyWordLevel::new(py, Some(PyVocab::Vocab(vocab)), unk_token)?, ) } } /// An implementation of the Unigram algorithm /// /// Args: /// vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`): /// A list of vocabulary items and their relative score [("am", -0.2442),...] #[pyclass(extends=PyModel, module = "tokenizers.models", name = "Unigram")] #[pyo3(text_signature = "(self, vocab, unk_id, byte_fallback)")] pub struct PyUnigram {} #[pymethods] impl PyUnigram { #[new] fn new( vocab: Option<Vec<(String, f64)>>, unk_id: Option<usize>, byte_fallback: Option<bool>, ) -> PyResult<(Self, PyModel)> { match (vocab, unk_id, byte_fallback) { (Some(vocab), unk_id, byte_fallback) => { let model = Unigram::from(vocab, unk_id, byte_fallback.unwrap_or(false)).map_err(|e| { exceptions::PyException::new_err(format!( "Error while loading Unigram: {}", e )) })?; Ok((PyUnigram {}, model.into())) } (None, None, _) => Ok((PyUnigram {}, Unigram::default().into())), _ => Err(exceptions::PyValueError::new_err( "`vocab` and `unk_id` must be both specified", )), } } } /// Models Module #[pymodule] pub fn models(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyModel>()?; m.add_class::<PyBPE>()?; m.add_class::<PyWordPiece>()?; m.add_class::<PyWordLevel>()?; m.add_class::<PyUnigram>()?; Ok(()) } #[cfg(test)] mod test { use crate::models::PyModel; use pyo3::prelude::*; use tk::models::bpe::BPE; use tk::models::ModelWrapper; #[test] fn get_subtype() { Python::with_gil(|py| { let py_model = PyModel::from(BPE::default()); let py_bpe = py_model.get_as_subtype(py).unwrap(); assert_eq!("BPE", py_bpe.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let rs_bpe = BPE::default(); let rs_bpe_ser = serde_json::to_string(&rs_bpe).unwrap(); let rs_wrapper: ModelWrapper = rs_bpe.into(); let rs_wrapper_ser = serde_json::to_string(&rs_wrapper).unwrap(); let py_model = PyModel::from(rs_wrapper); let py_ser = serde_json::to_string(&py_model).unwrap(); assert_eq!(py_ser, rs_bpe_ser); assert_eq!(py_ser, rs_wrapper_ser); let py_model: PyModel = serde_json::from_str(&rs_bpe_ser).unwrap(); match *py_model.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => (), _ => panic!("Expected Bert postprocessor."), }; let py_model: PyModel = serde_json::from_str(&rs_wrapper_ser).unwrap(); match *py_model.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => (), _ => panic!("Expected Bert postprocessor."), }; } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/normalizers.rs
use std::sync::{Arc, RwLock}; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use crate::error::ToPyResult; use crate::utils::{PyNormalizedString, PyNormalizedStringRefMut, PyPattern}; use serde::ser::SerializeStruct; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use tk::normalizers::{ BertNormalizer, Lowercase, Nmt, NormalizerWrapper, Precompiled, Prepend, Replace, Strip, StripAccents, NFC, NFD, NFKC, NFKD, }; use tk::{NormalizedString, Normalizer}; use tokenizers as tk; /// Represents the different kind of NormalizedString we can receive from Python: /// - Owned: Created in Python and owned by Python /// - RefMut: A mutable reference to a NormalizedString owned by Rust #[derive(FromPyObject)] enum PyNormalizedStringMut<'p> { Owned(PyRefMut<'p, PyNormalizedString>), RefMut(PyNormalizedStringRefMut), } impl PyNormalizedStringMut<'_> { /// Normalized the underlying `NormalizedString` using the provided normalizer pub fn normalize_with<N>(&mut self, normalizer: &N) -> PyResult<()> where N: Normalizer, { match self { PyNormalizedStringMut::Owned(ref mut n) => normalizer.normalize(&mut n.normalized), PyNormalizedStringMut::RefMut(n) => n.map_as_mut(|n| normalizer.normalize(n))?, } .map_err(|e| exceptions::PyException::new_err(format!("{}", e))) } } /// Base class for all normalizers /// /// This class is not supposed to be instantiated directly. Instead, any implementation of a /// Normalizer will return an instance of this class when instantiated. #[pyclass(dict, module = "tokenizers.normalizers", name = "Normalizer", subclass)] #[derive(Clone, Serialize, Deserialize)] pub struct PyNormalizer { #[serde(flatten)] pub(crate) normalizer: PyNormalizerTypeWrapper, } impl PyNormalizer { pub(crate) fn new(normalizer: PyNormalizerTypeWrapper) -> Self { PyNormalizer { normalizer } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match self.normalizer { PyNormalizerTypeWrapper::Sequence(_) => Py::new(py, (PySequence {}, base))?.into_py(py), PyNormalizerTypeWrapper::Single(ref inner) => match &*inner.as_ref().read().unwrap() { PyNormalizerWrapper::Custom(_) => Py::new(py, base)?.into_py(py), PyNormalizerWrapper::Wrapped(ref inner) => match inner { NormalizerWrapper::Sequence(_) => { Py::new(py, (PySequence {}, base))?.into_py(py) } NormalizerWrapper::BertNormalizer(_) => { Py::new(py, (PyBertNormalizer {}, base))?.into_py(py) } NormalizerWrapper::StripNormalizer(_) => { Py::new(py, (PyBertNormalizer {}, base))?.into_py(py) } NormalizerWrapper::Prepend(_) => Py::new(py, (PyPrepend {}, base))?.into_py(py), NormalizerWrapper::StripAccents(_) => { Py::new(py, (PyStripAccents {}, base))?.into_py(py) } NormalizerWrapper::NFC(_) => Py::new(py, (PyNFC {}, base))?.into_py(py), NormalizerWrapper::NFD(_) => Py::new(py, (PyNFD {}, base))?.into_py(py), NormalizerWrapper::NFKC(_) => Py::new(py, (PyNFKC {}, base))?.into_py(py), NormalizerWrapper::NFKD(_) => Py::new(py, (PyNFKD {}, base))?.into_py(py), NormalizerWrapper::Lowercase(_) => { Py::new(py, (PyLowercase {}, base))?.into_py(py) } NormalizerWrapper::Precompiled(_) => { Py::new(py, (PyPrecompiled {}, base))?.into_py(py) } NormalizerWrapper::Replace(_) => Py::new(py, (PyReplace {}, base))?.into_py(py), NormalizerWrapper::Nmt(_) => Py::new(py, (PyNmt {}, base))?.into_py(py), }, }, }) } } impl Normalizer for PyNormalizer { fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> { self.normalizer.normalize(normalized) } } #[pymethods] impl PyNormalizer { #[staticmethod] fn custom(obj: PyObject) -> Self { Self { normalizer: PyNormalizerWrapper::Custom(CustomNormalizer::new(obj)).into(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.normalizer).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Normalizer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.normalizer = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Normalizer: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Normalize a :class:`~tokenizers.NormalizedString` in-place /// /// This method allows to modify a :class:`~tokenizers.NormalizedString` to /// keep track of the alignment information. If you just want to see the result /// of the normalization on a raw string, you can use /// :meth:`~tokenizers.normalizers.Normalizer.normalize_str` /// /// Args: /// normalized (:class:`~tokenizers.NormalizedString`): /// The normalized string on which to apply this /// :class:`~tokenizers.normalizers.Normalizer` #[pyo3(text_signature = "(self, normalized)")] fn normalize(&self, mut normalized: PyNormalizedStringMut) -> PyResult<()> { normalized.normalize_with(&self.normalizer) } /// Normalize the given string /// /// This method provides a way to visualize the effect of a /// :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment /// information. If you need to get/convert offsets, you can use /// :meth:`~tokenizers.normalizers.Normalizer.normalize` /// /// Args: /// sequence (:obj:`str`): /// A string to normalize /// /// Returns: /// :obj:`str`: A string after normalization #[pyo3(text_signature = "(self, sequence)")] fn normalize_str(&self, sequence: &str) -> PyResult<String> { let mut normalized = NormalizedString::from(sequence); ToPyResult(self.normalizer.normalize(&mut normalized)).into_py()?; Ok(normalized.get().to_owned()) } } macro_rules! getter { ($self: ident, $variant: ident, $name: ident) => {{ let super_ = $self.as_ref(); if let PyNormalizerTypeWrapper::Single(ref norm) = super_.normalizer { let wrapper = norm.read().unwrap(); if let PyNormalizerWrapper::Wrapped(NormalizerWrapper::$variant(o)) = (*wrapper).clone() { o.$name } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyNormalizerTypeWrapper::Single(ref norm) = super_.normalizer { let mut wrapper = norm.write().unwrap(); if let PyNormalizerWrapper::Wrapped(NormalizerWrapper::$variant(ref mut o)) = *wrapper { o.$name = $value; } } }}; } /// BertNormalizer /// /// Takes care of normalizing raw text before giving it to a Bert model. /// This includes cleaning the text, handling accents, chinese chars and lowercasing /// /// Args: /// clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to clean the text, by removing any control characters /// and replacing all whitespaces by the classic one. /// /// handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to handle chinese chars by putting spaces around them. /// /// strip_accents (:obj:`bool`, `optional`): /// Whether to strip all accents. If this option is not specified (ie == None), /// then it will be determined by the value for `lowercase` (as in the original Bert). /// /// lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to lowercase. #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "BertNormalizer")] #[pyo3( text_signature = "(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True)" )] pub struct PyBertNormalizer {} #[pymethods] impl PyBertNormalizer { #[getter] fn get_clean_text(self_: PyRef<Self>) -> bool { getter!(self_, BertNormalizer, clean_text) } #[setter] fn set_clean_text(self_: PyRef<Self>, clean_text: bool) { setter!(self_, BertNormalizer, clean_text, clean_text); } #[getter] fn get_handle_chinese_chars(self_: PyRef<Self>) -> bool { getter!(self_, BertNormalizer, handle_chinese_chars) } #[setter] fn set_handle_chinese_chars(self_: PyRef<Self>, handle_chinese_chars: bool) { setter!( self_, BertNormalizer, handle_chinese_chars, handle_chinese_chars ); } #[getter] fn get_strip_accents(self_: PyRef<Self>) -> Option<bool> { getter!(self_, BertNormalizer, strip_accents) } #[setter] fn set_strip_accents(self_: PyRef<Self>, strip_accents: Option<bool>) { setter!(self_, BertNormalizer, strip_accents, strip_accents); } #[getter] fn get_lowercase(self_: PyRef<Self>) -> bool { getter!(self_, BertNormalizer, lowercase) } #[setter] fn set_lowercase(self_: PyRef<Self>, lowercase: bool) { setter!(self_, BertNormalizer, lowercase, lowercase) } #[new] #[pyo3(signature = ( clean_text = true, handle_chinese_chars = true, strip_accents = None, lowercase = true ))] fn new( clean_text: bool, handle_chinese_chars: bool, strip_accents: Option<bool>, lowercase: bool, ) -> (Self, PyNormalizer) { let normalizer = BertNormalizer::new(clean_text, handle_chinese_chars, strip_accents, lowercase); (PyBertNormalizer {}, normalizer.into()) } } /// NFD Unicode Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFD")] #[pyo3(text_signature = "(self)")] pub struct PyNFD {} #[pymethods] impl PyNFD { #[new] fn new() -> (Self, PyNormalizer) { (PyNFD {}, PyNormalizer::new(NFD.into())) } } /// NFKD Unicode Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFKD")] #[pyo3(text_signature = "(self)")] pub struct PyNFKD {} #[pymethods] impl PyNFKD { #[new] fn new() -> (Self, PyNormalizer) { (PyNFKD {}, NFKD.into()) } } /// NFC Unicode Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFC")] #[pyo3(text_signature = "(self)")] pub struct PyNFC {} #[pymethods] impl PyNFC { #[new] fn new() -> (Self, PyNormalizer) { (PyNFC {}, NFC.into()) } } /// NFKC Unicode Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFKC")] #[pyo3(text_signature = "(self)")] pub struct PyNFKC {} #[pymethods] impl PyNFKC { #[new] fn new() -> (Self, PyNormalizer) { (PyNFKC {}, NFKC.into()) } } /// Allows concatenating multiple other Normalizer as a Sequence. /// All the normalizers run in sequence in the given order /// /// Args: /// normalizers (:obj:`List[Normalizer]`): /// A list of Normalizer to be run as a sequence #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Sequence")] pub struct PySequence {} #[pymethods] impl PySequence { #[new] fn new(normalizers: &PyList) -> PyResult<(Self, PyNormalizer)> { let mut sequence = Vec::with_capacity(normalizers.len()); for n in normalizers.iter() { let normalizer: PyRef<PyNormalizer> = n.extract()?; match &normalizer.normalizer { PyNormalizerTypeWrapper::Sequence(inner) => sequence.extend(inner.iter().cloned()), PyNormalizerTypeWrapper::Single(inner) => sequence.push(inner.clone()), } } Ok(( PySequence {}, PyNormalizer::new(PyNormalizerTypeWrapper::Sequence(sequence)), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } fn __len__(&self) -> usize { 0 } } /// Lowercase Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Lowercase")] #[pyo3(text_signature = "(self)")] pub struct PyLowercase {} #[pymethods] impl PyLowercase { #[new] fn new() -> (Self, PyNormalizer) { (PyLowercase {}, Lowercase.into()) } } /// Strip normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Strip")] #[pyo3(text_signature = "(self, left=True, right=True)")] pub struct PyStrip {} #[pymethods] impl PyStrip { #[getter] fn get_left(self_: PyRef<Self>) -> bool { getter!(self_, StripNormalizer, strip_left) } #[setter] fn set_left(self_: PyRef<Self>, left: bool) { setter!(self_, StripNormalizer, strip_left, left) } #[getter] fn get_right(self_: PyRef<Self>) -> bool { getter!(self_, StripNormalizer, strip_right) } #[setter] fn set_right(self_: PyRef<Self>, right: bool) { setter!(self_, StripNormalizer, strip_right, right) } #[new] #[pyo3(signature = (left = true, right = true))] fn new(left: bool, right: bool) -> (Self, PyNormalizer) { (PyStrip {}, Strip::new(left, right).into()) } } /// Prepend normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Prepend")] #[pyo3(text_signature = "(self, prepend)")] pub struct PyPrepend {} #[pymethods] impl PyPrepend { #[getter] fn get_prepend(self_: PyRef<Self>) -> String { getter!(self_, Prepend, prepend) } #[setter] fn set_prepend(self_: PyRef<Self>, prepend: String) { setter!(self_, Prepend, prepend, prepend) } #[new] #[pyo3(signature = (prepend="▁".to_string()))] fn new(prepend: String) -> (Self, PyNormalizer) { (PyPrepend {}, Prepend::new(prepend).into()) } } /// StripAccents normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "StripAccents")] #[pyo3(text_signature = "(self)")] pub struct PyStripAccents {} #[pymethods] impl PyStripAccents { #[new] fn new() -> (Self, PyNormalizer) { (PyStripAccents {}, StripAccents.into()) } } /// Nmt normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Nmt")] #[pyo3(text_signature = "(self)")] pub struct PyNmt {} #[pymethods] impl PyNmt { #[new] fn new() -> (Self, PyNormalizer) { (PyNmt {}, Nmt.into()) } } /// Precompiled normalizer /// Don't use manually it is used for compatiblity for SentencePiece. #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Precompiled")] #[pyo3(text_signature = "(self, precompiled_charsmap)")] pub struct PyPrecompiled {} #[pymethods] impl PyPrecompiled { #[new] fn new(py_precompiled_charsmap: &PyBytes) -> PyResult<(Self, PyNormalizer)> { let precompiled_charsmap: &[u8] = FromPyObject::extract(py_precompiled_charsmap)?; Ok(( PyPrecompiled {}, Precompiled::from(precompiled_charsmap) .map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to build Precompiled normalizer: {}", e )) })? .into(), )) } } /// Replace normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Replace")] #[pyo3(text_signature = "(self, pattern, content)")] pub struct PyReplace {} #[pymethods] impl PyReplace { #[new] fn new(pattern: PyPattern, content: String) -> PyResult<(Self, PyNormalizer)> { Ok(( PyReplace {}, ToPyResult(Replace::new(pattern, content)).into_py()?.into(), )) } } #[derive(Debug, Clone)] pub(crate) struct CustomNormalizer { inner: PyObject, } impl CustomNormalizer { pub fn new(inner: PyObject) -> Self { Self { inner } } } impl tk::tokenizer::Normalizer for CustomNormalizer { fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> { Python::with_gil(|py| { let normalized = PyNormalizedStringRefMut::new(normalized); let py_normalized = self.inner.as_ref(py); py_normalized.call_method("normalize", (normalized.get(),), None)?; Ok(()) }) } } impl Serialize for CustomNormalizer { fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { Err(serde::ser::Error::custom( "Custom Normalizer cannot be serialized", )) } } impl<'de> Deserialize<'de> for CustomNormalizer { fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { Err(serde::de::Error::custom( "Custom Normalizer cannot be deserialized", )) } } #[derive(Debug, Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyNormalizerWrapper { Custom(CustomNormalizer), Wrapped(NormalizerWrapper), } impl Serialize for PyNormalizerWrapper { fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error> where S: Serializer, { match self { PyNormalizerWrapper::Wrapped(inner) => inner.serialize(serializer), PyNormalizerWrapper::Custom(inner) => inner.serialize(serializer), } } } #[derive(Debug, Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyNormalizerTypeWrapper { Sequence(Vec<Arc<RwLock<PyNormalizerWrapper>>>), Single(Arc<RwLock<PyNormalizerWrapper>>), } impl Serialize for PyNormalizerTypeWrapper { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self { PyNormalizerTypeWrapper::Sequence(seq) => { let mut ser = serializer.serialize_struct("Sequence", 2)?; ser.serialize_field("type", "Sequence")?; ser.serialize_field("normalizers", seq)?; ser.end() } PyNormalizerTypeWrapper::Single(inner) => inner.serialize(serializer), } } } impl<I> From<I> for PyNormalizerWrapper where I: Into<NormalizerWrapper>, { fn from(norm: I) -> Self { PyNormalizerWrapper::Wrapped(norm.into()) } } impl<I> From<I> for PyNormalizerTypeWrapper where I: Into<PyNormalizerWrapper>, { fn from(norm: I) -> Self { PyNormalizerTypeWrapper::Single(Arc::new(RwLock::new(norm.into()))) } } impl<I> From<I> for PyNormalizer where I: Into<NormalizerWrapper>, { fn from(norm: I) -> Self { PyNormalizer { normalizer: norm.into().into(), } } } impl Normalizer for PyNormalizerTypeWrapper { fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> { match self { PyNormalizerTypeWrapper::Single(inner) => inner.read().unwrap().normalize(normalized), PyNormalizerTypeWrapper::Sequence(inner) => inner .iter() .try_for_each(|n| n.read().unwrap().normalize(normalized)), } } } impl Normalizer for PyNormalizerWrapper { fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> { match self { PyNormalizerWrapper::Wrapped(inner) => inner.normalize(normalized), PyNormalizerWrapper::Custom(inner) => inner.normalize(normalized), } } } /// Normalizers Module #[pymodule] pub fn normalizers(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyNormalizer>()?; m.add_class::<PyBertNormalizer>()?; m.add_class::<PyNFD>()?; m.add_class::<PyNFKD>()?; m.add_class::<PyNFC>()?; m.add_class::<PyNFKC>()?; m.add_class::<PySequence>()?; m.add_class::<PyLowercase>()?; m.add_class::<PyStrip>()?; m.add_class::<PyStripAccents>()?; m.add_class::<PyPrepend>()?; m.add_class::<PyNmt>()?; m.add_class::<PyPrecompiled>()?; m.add_class::<PyReplace>()?; Ok(()) } #[cfg(test)] mod test { use pyo3::prelude::*; use tk::normalizers::unicode::{NFC, NFKC}; use tk::normalizers::utils::Sequence; use tk::normalizers::NormalizerWrapper; use crate::normalizers::{PyNormalizer, PyNormalizerTypeWrapper, PyNormalizerWrapper}; #[test] fn get_subtype() { Python::with_gil(|py| { let py_norm = PyNormalizer::new(NFC.into()); let py_nfc = py_norm.get_as_subtype(py).unwrap(); assert_eq!("NFC", py_nfc.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let py_wrapped: PyNormalizerWrapper = NFKC.into(); let py_ser = serde_json::to_string(&py_wrapped).unwrap(); let rs_wrapped = NormalizerWrapper::NFKC(NFKC); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_ser, rs_ser); let py_norm: PyNormalizer = serde_json::from_str(&rs_ser).unwrap(); match py_norm.normalizer { PyNormalizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PyNormalizerWrapper::Wrapped(NormalizerWrapper::NFKC(_)) => {} _ => panic!("Expected NFKC"), }, _ => panic!("Expected wrapped, not sequence."), } let py_seq: PyNormalizerWrapper = Sequence::new(vec![NFC.into(), NFKC.into()]).into(); let py_wrapper_ser = serde_json::to_string(&py_seq).unwrap(); let rs_wrapped = NormalizerWrapper::Sequence(Sequence::new(vec![NFC.into(), NFKC.into()])); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_wrapper_ser, rs_ser); let py_seq = PyNormalizer::new(py_seq.into()); let py_ser = serde_json::to_string(&py_seq).unwrap(); assert_eq!(py_wrapper_ser, py_ser); let rs_seq = Sequence::new(vec![NFC.into(), NFKC.into()]); let rs_ser = serde_json::to_string(&rs_seq).unwrap(); assert_eq!(py_wrapper_ser, rs_ser); } #[test] fn deserialize_sequence() { let string = r#"{"type": "NFKC"}"#; let normalizer: PyNormalizer = serde_json::from_str(string).unwrap(); match normalizer.normalizer { PyNormalizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PyNormalizerWrapper::Wrapped(NormalizerWrapper::NFKC(_)) => {} _ => panic!("Expected NFKC"), }, _ => panic!("Expected wrapped, not sequence."), } let sequence_string = format!(r#"{{"type": "Sequence", "normalizers": [{}]}}"#, string); let normalizer: PyNormalizer = serde_json::from_str(&sequence_string).unwrap(); match normalizer.normalizer { PyNormalizerTypeWrapper::Single(inner) => match &*inner.as_ref().read().unwrap() { PyNormalizerWrapper::Wrapped(NormalizerWrapper::Sequence(sequence)) => { let normalizers = sequence.get_normalizers(); assert_eq!(normalizers.len(), 1); match normalizers[0] { NormalizerWrapper::NFKC(_) => {} _ => panic!("Expected NFKC"), } } _ => panic!("Expected sequence"), }, _ => panic!("Expected single"), }; } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/error.rs
use pyo3::exceptions; use pyo3::prelude::*; use pyo3::type_object::PyTypeInfo; use std::fmt::{Display, Formatter, Result as FmtResult}; use tokenizers::tokenizer::Result; #[derive(Debug)] pub struct PyError(pub String); impl PyError { #[allow(dead_code)] pub fn from(s: &str) -> Self { PyError(String::from(s)) } pub fn into_pyerr<T: PyTypeInfo>(self) -> PyErr { PyErr::new::<T, _>(format!("{}", self)) } } impl Display for PyError { fn fmt(&self, fmt: &mut Formatter) -> FmtResult { write!(fmt, "{}", self.0) } } impl std::error::Error for PyError {} pub struct ToPyResult<T>(pub Result<T>); impl<T> From<ToPyResult<T>> for PyResult<T> { fn from(v: ToPyResult<T>) -> Self { v.0.map_err(|e| exceptions::PyException::new_err(format!("{}", e))) } } impl<T> ToPyResult<T> { pub fn into_py(self) -> PyResult<T> { self.into() } } pub(crate) fn deprecation_warning(py: Python<'_>, version: &str, message: &str) -> PyResult<()> { let deprecation_warning = py.import("builtins")?.getattr("DeprecationWarning")?; let full_message = format!("Deprecated in {}: {}", version, message); pyo3::PyErr::warn(py, deprecation_warning, &full_message, 0) }
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/mod.rs
use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use std::marker::PhantomData; use std::sync::{Arc, Mutex}; mod iterators; mod normalization; mod pretokenization; mod regex; pub use iterators::*; pub use normalization::*; pub use pretokenization::*; pub use regex::*; // PyChar // This type is a temporary hack to accept `char` as argument // To be removed once https://github.com/PyO3/pyo3/pull/1282 has been released pub struct PyChar(pub char); impl FromPyObject<'_> for PyChar { fn extract(obj: &PyAny) -> PyResult<Self> { let s = <PyString as PyTryFrom<'_>>::try_from(obj)?.to_str()?; let mut iter = s.chars(); if let (Some(ch), None) = (iter.next(), iter.next()) { Ok(Self(ch)) } else { Err(exceptions::PyValueError::new_err( "expected a string of length 1", )) } } } // RefMut utils pub trait DestroyPtr { fn destroy(&mut self); } pub struct RefMutGuard<'r, T: DestroyPtr + Clone> { content: T, r: PhantomData<&'r mut T>, } impl<T: DestroyPtr + Clone> RefMutGuard<'_, T> { pub fn new(content: T) -> Self { Self { content, r: PhantomData, } } pub fn get(&self) -> T { self.content.clone() } } impl<T: DestroyPtr + Clone> Drop for RefMutGuard<'_, T> { fn drop(&mut self) { self.content.destroy() } } #[derive(Clone)] pub struct RefMutContainer<T> { inner: Arc<Mutex<Option<*mut T>>>, } impl<T> RefMutContainer<T> { pub fn new(content: &mut T) -> Self { Self { inner: Arc::new(Mutex::new(Some(content))), } } pub fn map<F: FnOnce(&T) -> U, U>(&self, f: F) -> Option<U> { let lock = self.inner.lock().unwrap(); let ptr = lock.as_ref()?; Some(f(unsafe { ptr.as_ref().unwrap() })) } pub fn map_mut<F: FnOnce(&mut T) -> U, U>(&mut self, f: F) -> Option<U> { let lock = self.inner.lock().unwrap(); let ptr = lock.as_ref()?; Some(f(unsafe { ptr.as_mut().unwrap() })) } } impl<T> DestroyPtr for RefMutContainer<T> { fn destroy(&mut self) { self.inner.lock().unwrap().take(); } } unsafe impl<T: Send> Send for RefMutContainer<T> {} unsafe impl<T: Sync> Sync for RefMutContainer<T> {}
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/iterators.rs
use pyo3::prelude::*; use pyo3::AsPyPointer; use std::collections::VecDeque; /// An simple iterator that can be instantiated with a specified length. /// We use this with iterators that don't have a size_hint but we might /// know its size. This is useful with progress bars for example. pub struct MaybeSizedIterator<I> { length: Option<usize>, iter: I, } impl<I> MaybeSizedIterator<I> where I: Iterator, { pub fn new(iter: I, length: Option<usize>) -> Self { Self { length, iter } } } impl<I> Iterator for MaybeSizedIterator<I> where I: Iterator, { type Item = I::Item; fn next(&mut self) -> Option<Self::Item> { self.iter.next() } fn size_hint(&self) -> (usize, Option<usize>) { (self.length.unwrap_or(0), None) } } /// A buffered iterator that takes care of locking the GIL only when needed. /// The `PyIterator` provided by PyO3 keeps a Python GIL token all along /// and thus doesn't allow us to release the GIL to allow having other threads. /// /// This iterator serves two purposes: /// - First, as opposed to the `pyo3::PyIterator`, it is Send and can easily be parallelized /// - Second, this let us release the GIL between two refills of the buffer, allowing other /// Python threads to work pub struct PyBufferedIterator<T, F> { iter: Option<Py<PyAny>>, converter: F, buffer: VecDeque<PyResult<T>>, size: usize, } impl<T, F, I> PyBufferedIterator<T, F> where F: Fn(&PyAny) -> I, I: IntoIterator<Item = PyResult<T>>, { /// Create a new PyBufferedIterator using the provided Python object. /// This object must implement the Python Iterator Protocol, and an error will /// be return if the contract is not respected. /// /// The `converter` provides a way to convert each item in the iterator into /// something that doesn't embed a 'py token and thus allows the GIL to be released /// /// The `buffer_size` represents the number of items that we buffer before we /// need to acquire the GIL again. pub fn new(iter: &PyAny, converter: F, buffer_size: usize) -> PyResult<Self> { let py = iter.py(); let iter: Py<PyAny> = unsafe { py.from_borrowed_ptr_or_err::<PyAny>(pyo3::ffi::PyObject_GetIter(iter.as_ptr()))? .to_object(py) }; Ok(Self { iter: Some(iter), converter, buffer: VecDeque::with_capacity(buffer_size), size: buffer_size, }) } /// Refill the buffer, and set `self.iter` as `None` if nothing more to get fn refill(&mut self) -> PyResult<()> { if self.iter.is_none() { return Ok(()); } Python::with_gil(|py| loop { if self.buffer.len() >= self.size { return Ok(()); } match unsafe { py.from_owned_ptr_or_opt::<PyAny>(pyo3::ffi::PyIter_Next( self.iter.as_ref().unwrap().as_ref(py).as_ptr(), )) } { Some(obj) => self.buffer.extend((self.converter)(obj)), None => { if PyErr::occurred(py) { return Err(PyErr::fetch(py)); } else { self.iter = None; } } }; if self.iter.is_none() { return Ok(()); } }) } } impl<T, F, I> Iterator for PyBufferedIterator<T, F> where F: Fn(&PyAny) -> I, I: IntoIterator<Item = PyResult<T>>, { type Item = PyResult<T>; fn next(&mut self) -> Option<Self::Item> { if !self.buffer.is_empty() { self.buffer.pop_front() } else if self.iter.is_some() { if let Err(e) = self.refill() { return Some(Err(e)); } self.next() } else { None } } }
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/pretokenization.rs
use tokenizers as tk; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use super::{ DestroyPtr, PyNormalizedString, PyNormalizedStringRefMut, RefMutContainer, RefMutGuard, }; use crate::encoding::PyEncoding; use crate::error::ToPyResult; use crate::token::PyToken; use tk::{OffsetReferential, OffsetType, Offsets, PreTokenizedString, Token}; fn split(pretok: &mut PreTokenizedString, func: &PyAny) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`split` expect a callable with the signature: \ `fn(index: int, normalized: NormalizedString) -> List[NormalizedString]`", )) } else { ToPyResult(pretok.split(|i, normalized| { let output = func.call((i, PyNormalizedString::from(normalized)), None)?; Ok(output .extract::<Vec<PyNormalizedString>>()? .into_iter() .map(tk::NormalizedString::from)) })) .into() } } fn normalize(pretok: &mut PreTokenizedString, func: &PyAny) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`normalize` expect a callable with the signature: \ `fn(normalized: NormalizedString)`", )) } else { ToPyResult(pretok.normalize(|normalized| { let norm = PyNormalizedStringRefMut::new(normalized); func.call((norm.get(),), None)?; Ok(()) })) .into() } } fn tokenize(pretok: &mut PreTokenizedString, func: &PyAny) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`tokenize` expect a callable with the signature: \ `fn(str) -> List[Token]`", )) } else { ToPyResult(pretok.tokenize(|normalized| { let output = func.call((normalized.get(),), None)?; Ok(output .extract::<&PyList>()? .into_iter() .map(|obj| Ok(Token::from(obj.extract::<PyToken>()?))) .collect::<PyResult<Vec<_>>>()?) })) .into() } } /// This is an enum #[derive(Clone)] pub struct PyOffsetReferential(OffsetReferential); impl FromPyObject<'_> for PyOffsetReferential { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "original" => Ok(OffsetReferential::Original), "normalized" => Ok(OffsetReferential::Normalized), _ => Err(exceptions::PyValueError::new_err( "Wrong value for OffsetReferential, expected one of `original, normalized`", )), }?)) } } #[derive(Clone)] pub struct PyOffsetType(OffsetType); impl FromPyObject<'_> for PyOffsetType { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "byte" => Ok(OffsetType::Byte), "char" => Ok(OffsetType::Char), _ => Err(exceptions::PyValueError::new_err( "Wrong value for OffsetType, expected one of `byte, char`", )), }?)) } } type PySplit = (String, Offsets, Option<Vec<PyToken>>); fn get_splits( pretok: &PreTokenizedString, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> Vec<PySplit> { pretok .get_splits(offset_referential.0, offset_type.0) .into_iter() .map(|(s, o, t)| { ( s.to_owned(), o, t.as_ref() .map(|tokens| tokens.iter().map(|t| t.clone().into()).collect()), ) }) .collect() } fn to_encoding( pretok: &PreTokenizedString, type_id: u32, word_idx: Option<u32>, ) -> PyResult<PyEncoding> { Ok(ToPyResult( pretok .clone() .into_encoding(word_idx, type_id, tk::OffsetType::Char), ) .into_py()? .into()) } /// PreTokenizedString /// /// Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the /// underlying string, while keeping track of the alignment information (offsets). /// /// The PreTokenizedString manages what we call `splits`. Each split represents a substring /// which is a subpart of the original string, with the relevant offsets and tokens. /// /// When calling one of the methods used to modify the PreTokenizedString (namely one of /// `split`, `normalize` or `tokenize), only the `splits` that don't have any associated /// tokens will get modified. /// /// Args: /// sequence: str: /// The string sequence used to initialize this PreTokenizedString #[pyclass(module = "tokenizers", name = "PreTokenizedString")] #[pyo3(text_signature = "(self, sequence)")] pub struct PyPreTokenizedString { pub(crate) pretok: tk::PreTokenizedString, } impl From<PreTokenizedString> for PyPreTokenizedString { fn from(pretok: PreTokenizedString) -> Self { Self { pretok } } } impl From<PyPreTokenizedString> for PreTokenizedString { fn from(pretok: PyPreTokenizedString) -> Self { pretok.pretok } } #[pymethods] impl PyPreTokenizedString { #[new] fn new(s: &str) -> Self { PreTokenizedString::from(s).into() } /// Split the PreTokenizedString using the given `func` /// /// Args: /// func: Callable[[index, NormalizedString], List[NormalizedString]]: /// The function used to split each underlying split. /// It is expected to return a list of `NormalizedString`, that represent the new /// splits. If the given `NormalizedString` does not need any splitting, we can /// just return it directly. /// In order for the offsets to be tracked accurately, any returned `NormalizedString` /// should come from calling either `.split` or `.slice` on the received one. #[pyo3(text_signature = "(self, func)")] fn split(&mut self, func: &PyAny) -> PyResult<()> { split(&mut self.pretok, func) } /// Normalize each split of the `PreTokenizedString` using the given `func` /// /// Args: /// func: Callable[[NormalizedString], None]: /// The function used to normalize each underlying split. This function /// does not need to return anything, just calling the methods on the provided /// NormalizedString allow its modification. #[pyo3(text_signature = "(self, func)")] fn normalize(&mut self, func: &PyAny) -> PyResult<()> { normalize(&mut self.pretok, func) } /// Tokenize each split of the `PreTokenizedString` using the given `func` /// /// Args: /// func: Callable[[str], List[Token]]: /// The function used to tokenize each underlying split. This function must return /// a list of Token generated from the input str. #[pyo3(text_signature = "(self, func)")] fn tokenize(&mut self, func: &PyAny) -> PyResult<()> { tokenize(&mut self.pretok, func) } /// Return an Encoding generated from this PreTokenizedString /// /// Args: /// type_id: int = 0: /// The type_id to be used on the generated Encoding. /// /// word_idx: Optional[int] = None: /// An optional word index to be used for each token of this Encoding. If provided, /// all the word indices in the generated Encoding will use this value, instead /// of the one automatically tracked during pre-tokenization. /// /// Returns: /// An Encoding #[pyo3(signature = (type_id = 0, word_idx = None))] #[pyo3(text_signature = "(self, type_id=0, word_idx=None)")] fn to_encoding(&self, type_id: u32, word_idx: Option<u32>) -> PyResult<PyEncoding> { to_encoding(&self.pretok, type_id, word_idx) } /// Get the splits currently managed by the PreTokenizedString /// /// Args: /// offset_referential: :obj:`str` /// Whether the returned splits should have offsets expressed relative /// to the original string, or the normalized one. choices: "original", "normalized". /// /// offset_type: :obj:`str` /// Whether the returned splits should have offsets expressed in bytes or chars. /// When slicing an str, we usually want to use chars, which is the default value. /// Now in some cases it might be interesting to get these offsets expressed in bytes, /// so it is possible to change this here. /// choices: "char", "bytes" /// /// Returns /// A list of splits #[pyo3(signature = ( offset_referential = PyOffsetReferential(OffsetReferential::Original), offset_type = PyOffsetType(OffsetType::Char) ))] #[pyo3(text_signature = "(self, offset_referential=\"original\", offset_type=\"char\")")] fn get_splits( &self, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> Vec<PySplit> { get_splits(&self.pretok, offset_referential, offset_type) } } #[pyclass(module = "tokenizers", name = "PreTokenizedString")] #[derive(Clone)] pub struct PyPreTokenizedStringRefMut { inner: RefMutContainer<PreTokenizedString>, } impl DestroyPtr for PyPreTokenizedStringRefMut { fn destroy(&mut self) { self.inner.destroy(); } } impl PyPreTokenizedStringRefMut { pub fn new(pretok: &mut tk::PreTokenizedString) -> RefMutGuard<Self> { // SAFETY: This is safe because we return a RefMutGuard here. // The compiler will make sure the &mut stays valid as necessary. RefMutGuard::new(Self { inner: RefMutContainer::new(pretok), }) } pub fn destroyed_error() -> PyErr { exceptions::PyException::new_err( "Cannot use a PreTokenizedStringRefMut outside `pre_tokenize`", ) } } #[pymethods] impl PyPreTokenizedStringRefMut { fn split(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|pretok| split(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } fn normalize(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|pretok| normalize(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } fn tokenize(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|pretok| tokenize(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } #[pyo3(signature = (type_id = 0, word_idx = None))] fn to_encoding(&self, type_id: u32, word_idx: Option<u32>) -> PyResult<PyEncoding> { self.inner .map(|pretok| to_encoding(pretok, type_id, word_idx)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } #[pyo3(signature = ( offset_referential = PyOffsetReferential(OffsetReferential::Original), offset_type = PyOffsetType(OffsetType::Char) ))] fn get_splits( &self, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> PyResult<Vec<PySplit>> { self.inner .map(|pretok| get_splits(pretok, offset_referential, offset_type)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error) } }
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/regex.rs
use onig::Regex; use pyo3::exceptions; use pyo3::prelude::*; /// Instantiate a new Regex with the given pattern #[pyclass(module = "tokenizers", name = "Regex")] #[pyo3(text_signature = "(self, pattern)")] pub struct PyRegex { pub inner: Regex, pub pattern: String, } #[pymethods] impl PyRegex { #[new] fn new(s: &str) -> PyResult<Self> { Ok(Self { inner: Regex::new(s) .map_err(|e| exceptions::PyException::new_err(e.description().to_owned()))?, pattern: s.to_owned(), }) } }
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/normalization.rs
use super::regex::PyRegex; use super::{DestroyPtr, RefMutContainer, RefMutGuard}; use crate::error::ToPyResult; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use tk::normalizer::{char_to_bytes, NormalizedString, Range, SplitDelimiterBehavior}; use tk::pattern::Pattern; /// Represents a Pattern as used by `NormalizedString` #[derive(Clone, FromPyObject)] pub enum PyPattern<'p> { #[pyo3(annotation = "str")] Str(&'p str), #[pyo3(annotation = "tokenizers.Regex")] Regex(Py<PyRegex>), // TODO: Add the compatibility for Fn(char) -> bool } impl Pattern for PyPattern<'_> { fn find_matches(&self, inside: &str) -> tk::Result<Vec<(tk::Offsets, bool)>> { match self { PyPattern::Str(s) => { let mut chars = s.chars(); if let (Some(c), None) = (chars.next(), chars.next()) { c.find_matches(inside) } else { s.find_matches(inside) } } PyPattern::Regex(r) => { Python::with_gil(|py| (&r.borrow(py).inner).find_matches(inside)) } } } } impl From<PyPattern<'_>> for tk::normalizers::replace::ReplacePattern { fn from(pattern: PyPattern<'_>) -> Self { match pattern { PyPattern::Str(s) => Self::String(s.to_owned()), PyPattern::Regex(r) => Python::with_gil(|py| Self::Regex(r.borrow(py).pattern.clone())), } } } impl From<PyPattern<'_>> for tk::pre_tokenizers::split::SplitPattern { fn from(pattern: PyPattern<'_>) -> Self { match pattern { PyPattern::Str(s) => Self::String(s.to_owned()), PyPattern::Regex(r) => Python::with_gil(|py| Self::Regex(r.borrow(py).pattern.clone())), } } } #[derive(Debug, Clone, FromPyObject)] pub enum PyRange<'s> { #[pyo3(annotation = "int")] Single(isize), #[pyo3(annotation = "Tuple[uint, uint]")] Range(usize, usize), #[pyo3(annotation = "slice")] Slice(&'s PySlice), } impl PyRange<'_> { pub fn to_range(&self, max_len: usize) -> PyResult<std::ops::Range<usize>> { match self { PyRange::Single(i) => { if i.is_negative() { let i = -i as usize; if i > max_len { Err(exceptions::PyValueError::new_err(format!( "{} is bigger than max len", i ))) } else { Ok(max_len - i..max_len - i + 1) } } else { let i = *i as usize; Ok(i..i + 1) } } PyRange::Range(s, e) => Ok(*s..*e), PyRange::Slice(s) => { let r = s.indices(max_len as std::os::raw::c_long)?; Ok(r.start as usize..r.stop as usize) } } } } #[derive(Clone)] pub struct PySplitDelimiterBehavior(pub SplitDelimiterBehavior); impl FromPyObject<'_> for PySplitDelimiterBehavior { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "removed" => Ok(SplitDelimiterBehavior::Removed), "isolated" => Ok(SplitDelimiterBehavior::Isolated), "merged_with_previous" => Ok(SplitDelimiterBehavior::MergedWithPrevious), "merged_with_next" => Ok(SplitDelimiterBehavior::MergedWithNext), "contiguous" => Ok(SplitDelimiterBehavior::Contiguous), _ => Err(exceptions::PyValueError::new_err( "Wrong value for SplitDelimiterBehavior, expected one of: \ `removed, isolated, merged_with_previous, merged_with_next, contiguous`", )), }?)) } } impl From<PySplitDelimiterBehavior> for SplitDelimiterBehavior { fn from(v: PySplitDelimiterBehavior) -> Self { v.0 } } fn filter(normalized: &mut NormalizedString, func: &PyAny) -> PyResult<()> { let err = "`filter` expect a callable with the signature: `fn(char) -> bool`"; if !func.is_callable() { Err(exceptions::PyTypeError::new_err(err)) } else { normalized.filter(|c| { func.call1((c.to_string(),)) .expect(err) .extract() .expect(err) }); Ok(()) } } fn for_each(normalized: &NormalizedString, func: &PyAny) -> PyResult<()> { let err = "`for_each` expect a callable with the signature: `fn(char)`"; if !func.is_callable() { Err(exceptions::PyTypeError::new_err(err)) } else { normalized.for_each(|c| { func.call1((c.to_string(),)).expect(err); }); Ok(()) } } fn map(normalized: &mut NormalizedString, func: &PyAny) -> PyResult<()> { let err = "`map` expect a callable with the signature: `fn(char) -> char`"; if !func.is_callable() { Err(exceptions::PyTypeError::new_err(err)) } else { normalized.map(|c| { let c: &str = func .call1((c.to_string(),)) .expect(err) .extract() .expect(err); c.chars().next().expect(err) }); Ok(()) } } fn slice( normalized: &NormalizedString, range: &PyRange<'_>, ) -> PyResult<Option<PyNormalizedString>> { let n_char = normalized.len(); let char_range = range.to_range(n_char)?; Ok( char_to_bytes(normalized.get(), char_range).and_then(|bytes_range| { normalized .slice(Range::Normalized(bytes_range)) .map(|n| n.into()) }), ) } /// NormalizedString /// /// A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one. /// While making all the requested modifications, it keeps track of the alignment information /// between the two versions of the string. /// /// Args: /// sequence: str: /// The string sequence used to initialize this NormalizedString #[pyclass(module = "tokenizers", name = "NormalizedString")] #[derive(Clone)] pub struct PyNormalizedString { pub(crate) normalized: NormalizedString, } #[pymethods] impl PyNormalizedString { #[new] fn new(s: &str) -> Self { NormalizedString::from(s).into() } /// The normalized part of the string #[getter] fn get_normalized(&self) -> &str { self.normalized.get() } #[getter] fn get_original(&self) -> &str { self.normalized.get_original() } /// Runs the NFD normalization #[pyo3(text_signature = "(self)")] fn nfd(&mut self) { self.normalized.nfd(); } /// Runs the NFKD normalization #[pyo3(text_signature = "(self)")] fn nfkd(&mut self) { self.normalized.nfkd(); } /// Runs the NFC normalization #[pyo3(text_signature = "(self)")] fn nfc(&mut self) { self.normalized.nfc(); } /// Runs the NFKC normalization #[pyo3(text_signature = "(self)")] fn nfkc(&mut self) { self.normalized.nfkc(); } /// Lowercase the string #[pyo3(text_signature = "(self)")] fn lowercase(&mut self) { self.normalized.lowercase(); } /// Uppercase the string #[pyo3(text_signature = "(self)")] fn uppercase(&mut self) { self.normalized.uppercase(); } /// Prepend the given sequence to the string #[pyo3(text_signature = "(self, s)")] fn prepend(&mut self, s: &str) { self.normalized.prepend(s); } /// Append the given sequence to the string #[pyo3(text_signature = "(self, s)")] fn append(&mut self, s: &str) { self.normalized.append(s); } /// Strip the left of the string #[pyo3(text_signature = "(self)")] fn lstrip(&mut self) { self.normalized.lstrip(); } /// Strip the right of the string #[pyo3(text_signature = "(self)")] fn rstrip(&mut self) { self.normalized.rstrip(); } /// Strip both ends of the string #[pyo3(text_signature = "(self)")] fn strip(&mut self) { self.normalized.strip(); } /// Clears the string #[pyo3(text_signature = "(self)")] fn clear(&mut self) { self.normalized.clear(); } /// Slice the string using the given range #[pyo3(text_signature = "(self, range)")] fn slice(&self, range: PyRange) -> PyResult<Option<PyNormalizedString>> { slice(&self.normalized, &range) } /// Filter each character of the string using the given func #[pyo3(text_signature = "(self, func)")] fn filter(&mut self, func: &PyAny) -> PyResult<()> { filter(&mut self.normalized, func) } /// Calls the given function for each character of the string #[pyo3(text_signature = "(self, func)")] fn for_each(&self, func: &PyAny) -> PyResult<()> { for_each(&self.normalized, func) } /// Calls the given function for each character of the string /// /// Replaces each character of the string using the returned value. Each /// returned value **must** be a str of length 1 (ie a character). #[pyo3(text_signature = "(self, func)")] fn map(&mut self, func: &PyAny) -> PyResult<()> { map(&mut self.normalized, func) } /// Split the NormalizedString using the given pattern and the specified behavior /// /// Args: /// pattern: Pattern: /// A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex` /// /// behavior: SplitDelimiterBehavior: /// The behavior to use when splitting. /// Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", /// "contiguous" /// /// Returns: /// A list of NormalizedString, representing each split #[pyo3(text_signature = "(self, pattern, behavior)")] fn split( &mut self, pattern: PyPattern, behavior: PySplitDelimiterBehavior, ) -> PyResult<Vec<PyNormalizedString>> { Ok(ToPyResult(self.normalized.split(pattern, behavior.into())) .into_py()? .into_iter() .map(|n| n.into()) .collect()) } /// Replace the content of the given pattern with the provided content /// /// Args: /// pattern: Pattern: /// A pattern used to match the string. Usually a string or a Regex /// /// content: str: /// The content to be used as replacement #[pyo3(text_signature = "(self, pattern, content)")] fn replace(&mut self, pattern: PyPattern, content: &str) -> PyResult<()> { ToPyResult(self.normalized.replace(pattern, content)).into() } fn __repr__(&self) -> String { format!( r#"NormalizedString(original="{}", normalized="{}")"#, self.normalized.get_original(), self.normalized.get() ) } fn __str__(&self) -> &str { self.normalized.get() } fn __getitem__(&self, range: PyRange<'_>) -> PyResult<Option<PyNormalizedString>> { slice(&self.normalized, &range) } } impl From<NormalizedString> for PyNormalizedString { fn from(normalized: NormalizedString) -> Self { Self { normalized } } } impl From<PyNormalizedString> for NormalizedString { fn from(normalized: PyNormalizedString) -> Self { normalized.normalized } } #[pyclass(module = "tokenizers", name = "NormalizedStringRefMut")] #[derive(Clone)] pub struct PyNormalizedStringRefMut { inner: RefMutContainer<NormalizedString>, } impl DestroyPtr for PyNormalizedStringRefMut { fn destroy(&mut self) { self.inner.destroy(); } } impl PyNormalizedStringRefMut { pub fn new(normalized: &mut NormalizedString) -> RefMutGuard<Self> { RefMutGuard::new(Self { inner: RefMutContainer::new(normalized), }) } pub fn destroyed_error() -> PyErr { exceptions::PyException::new_err("Cannot use a NormalizedStringRefMut outside `normalize`") } /// Provides a way to access a reference to the underlying NormalizedString pub fn map_as_ref<F: FnOnce(&NormalizedString) -> U, U>(&self, f: F) -> PyResult<U> { self.inner .map(f) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } /// Provides a way to access a mutable reference to the underlying NormalizedString pub fn map_as_mut<F: FnOnce(&mut NormalizedString) -> U, U>(&mut self, f: F) -> PyResult<U> { self.inner .map_mut(f) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } } #[pymethods] impl PyNormalizedStringRefMut { #[getter] fn get_normalized(&self) -> PyResult<String> { self.inner .map(|n| n.get().to_owned()) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } #[getter] fn get_original(&self) -> PyResult<String> { self.inner .map(|n| n.get_original().to_owned()) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } fn nfd(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfd(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn nfkd(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfkd(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn nfc(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfc(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn nfkc(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfkc(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn lowercase(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.lowercase(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn uppercase(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.uppercase(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn prepend(&mut self, s: &str) -> PyResult<()> { self.inner .map_mut(|n| { n.prepend(s); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn append(&mut self, s: &str) -> PyResult<()> { self.inner .map_mut(|n| { n.append(s); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn lstrip(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.lstrip(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn rstrip(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.rstrip(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn strip(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.strip(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn clear(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.clear(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn slice(&self, range: PyRange) -> PyResult<Option<PyNormalizedString>> { self.inner .map(|n| slice(n, &range)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)? } fn filter(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|n| filter(n, func)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)??; Ok(()) } fn for_each(&self, func: &PyAny) -> PyResult<()> { self.inner .map(|n| for_each(n, func)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)??; Ok(()) } fn map(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|n| map(n, func)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)??; Ok(()) } fn split( &mut self, pattern: PyPattern, behavior: PySplitDelimiterBehavior, ) -> PyResult<Vec<PyNormalizedString>> { Ok(ToPyResult( self.inner .map_mut(|n| n.split(pattern, behavior.into())) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?, ) .into_py()? .into_iter() .map(|n| n.into()) .collect()) } fn replace(&mut self, pattern: PyPattern, content: &str) -> PyResult<()> { ToPyResult( self.inner .map_mut(|n| n.replace(pattern, content)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?, ) .into() } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/.cargo/config.toml
[target.x86_64-apple-darwin] rustflags = [ "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", "-C", "link-arg=-mmacosx-version-min=10.11", ] [target.aarch64-apple-darwin] rustflags = [ "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", "-C", "link-arg=-mmacosx-version-min=10.11", ]
0
hf_public_repos/tokenizers
hf_public_repos/tokenizers/tokenizers/CHANGELOG.md
# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [0.13.2] - Python only changes ## [0.13.1] - [#1072] Fixing Roberta type ids. ## [0.13.0] - [#1009] `unstable_wasm` feature to support building on Wasm (it's unstable !) - [#1008] `Decoder` is now a composable trait, but without being backward incompatible - [#1047, #1051, #1052] `Processor` is now a composable trait, but without being backward incompatible Both trait changes warrant a "major" number since, despite best efforts to not break backward compatibility, the code is different enough that we cannot be exactly sure. ## [0.12.1] - [#938] **Reverted breaking change**. https://github.com/huggingface/transformers/issues/16520 ## [0.12.0] YANKED Bump minor version because of a breaking change. - [#938] [REVERTED IN 0.12.1] **Breaking change**. Decoder trait is modified to be composable. This is only breaking if you are using decoders on their own. tokenizers should be error free. - [#939] Making the regex in `ByteLevel` pre_tokenizer optional (necessary for BigScience) - [#952] Fixed the vocabulary size of UnigramTrainer output (to respect added tokens) - [#954] Fixed not being able to save vocabularies with holes in vocab (ConvBert). Yell warnings instead, but stop panicking. - [#961] Added link for Ruby port of `tokenizers` - [#960] Feature gate for `cli` and its `clap` dependency ## [0.11.3] - [#919] Fixing single_word AddedToken. (regression from 0.11.2) - [#916] Deserializing faster `added_tokens` by loading them in batch. ## [0.11.2] - [#884] Fixing bad deserialization following inclusion of a default for Punctuation ## [0.11.1] - [#882] Fixing Punctuation deserialize without argument. - [#868] Fixing missing direction in TruncationParams - [#860] Adding TruncationSide to TruncationParams ## [0.11.0] ### Fixed - [#236]: Fix a bug with offsets being shifted when there are sub-sequences (Usually with special tokens and/or added tokens in the sequence). - [#286]: Fix various crash when training a BPE model - [#309]: Fixed a few bugs related to additional vocabulary/tokens - [#363]: Fix panic from unwrapping `File::open` in `count_words` ### Changed - [#234]: Completely changed the alignement mappings available on `Encoding`. Previous mappings were misleading and only providing offsets. New ones provide methods to easily convert between `char` or `word` (input space) and `token` (output space) - [#236]: `AddedToken` with special options like `rstrip` will keep the matched whitespaces in the textual representation of the token, exposed in `tokens` on the `Encoding`. The ID stays the same as usual. This fixes the offsets for said tokens. - [#236]: Offsets are now converted back to the original referential before we merge the sub-sequences together and then do the post-processing. This also fixes some offsets bugs. - [#236]: ByteLevel PostProcessor now uses the `add_prefix_space` attribute to determine how to trim offsets. - Improved `TruncationError` to handle cases where provided max length is too low. - [#249]: `encode` and `encode_batch` input has been greatly improved, and it now also accept pre-tokenized inputs. - Improved `TruncationError` to handle cases where provided max length is too low. - [#276]: Improve BPE training speeds, by reading files sequentially, but parallelizing the processing of each file - [#280]: Use `onig` for byte-level pre-tokenization to remove all the differences with the original implementation from GPT-2 - [#309]: Improved the management of the additional vocabulary. This introduces an option `normalized`, controlling whether a token should be extracted from the normalized version of the input text. - [#330]: BertNormalizer now keeps the same behavior than the original implementation when `strip_accents` is not specified. - [#355]: Tokenizer does not use any dynamic dispatch anymore. - [#377]: Use byte offsets everywhere (instead of the char offsets) ### Added - [#236]: RobertaProcessing is now also taking care of trimming offsets, and works just as ByteLevel on this front. - [#272]: Serialization of the `Tokenizer` and all the parts (`PreTokenizer`, `Normalizer`, ...) using serde. It is now easy to save/load an entire tokenizer. - [#289]: Ability to pad to a multiple of a specified value. This is especially useful to ensure activation of the Tensor Cores, while ensuring padding to a multiple of 8. - [#298]: Ability to get the currently set truncation/padding params - [#311]: Ability to enable/disable the parallelism using the `TOKENIZERS_PARALLELISM` environment variable. - [#403]: Add `TemplateProcessing` `PostProcessor`. ### How to migrate - Replace any `XXX_to_YYY_offsets()` method call by any of the new ones. - Specify the `add_prefix_space` and `trim_offsets` options on `RobertaProcessing` if you don't want the offsets trimmed out. - Any custom `PostProcessor` now handles offsets relative to the original string (as opposed to the normalized one). ## [0.10.1] ### Fixed - [#226]: Fix the word indexes when there are special tokens ## [0.10.0] ### Changed - [#222]: All Tokenizer's subparts must now be `Send + Sync` ### Added - [#208]: Ability to retrieve the vocabulary from the `Tokenizer` & `Model` ### Fixed - [#205]: Trim the decoded string in `BPEDecoder` - [b770f36]: Fix a bug with added tokens generated IDs ## [0.9.0] ### Changed - Only one progress bar while reading files during training. This is better for use-cases with a high number of files as it avoids having too many progress bars on screen. Also avoids reading the size of each file before starting to actually read these files, as this process could take really long. - [#190]: Improved BPE and WordPiece builders - [#193]: `encode` and `encode_batch` now take a new argument, specifying whether we should add the special tokens - [#197]: The `NormalizedString` has been removed from the `Encoding`. It is now possible to retrieve it by calling `normalize` on the `Tokenizer`. This brings a reduction of 70% of the memory footprint - [#197]: The `NormalizedString` API has been improved. It is now possible to retrieve parts of both strings using both "normalized" or "original" offsets - [#197]: The offsets provided on `Encoding` are now relative to the original string, and not the normalized one anymore - `AddedToken` are now used for both `add_special_tokens` and `add_tokens`. Also, these AddedToken have more options to allow various behaviors. ### Added - [#188]: `impl PostProcessor for ByteLevel`: Handles trimming the offsets if activated. This avoids the unintuitive inclusion of the whitespaces in the produced offsets, even if these whitespaces are part of the actual token - More alignment mappings on the `Encoding`. - `post_process` can be called on the `Tokenizer` ### Fixed - [#193]: Fix some issues with the offsets being wrong with the `ByteLevel` BPE: - when `add_prefix_space` is activated - [#156]: when a Unicode character gets split-up in multiple byte-level characters - Fix a bug where offsets were wrong when there was any added tokens in the sequence being encoded. - [#175]: Fix a bug that prevented the addition of more than a certain amount of tokens (even if not advised, but that's not the question) ### How to migrate - Add the `ByteLevel` `PostProcessor` to your byte-level BPE tokenizers if relevant. ## [0.8.0] ### Changed - [#165]: Big improvements in speed for BPE (Both training and tokenization) ### Fixed - [#163]: Do not open all files directly while training - [#156]: There was a bug in ByteLevel PreTokenizer that caused offsets to be wrong if a char got split up in multiple bytes - [#174]: The `LongestFirst` truncation strategy had a bug [#1072]: https://github.com/huggingface/tokenizers/pull/1072 [#956]: https://github.com/huggingface/tokenizers/pull/956 [#1008]: https://github.com/huggingface/tokenizers/pull/1008 [#1009]: https://github.com/huggingface/tokenizers/pull/1009 [#1047]: https://github.com/huggingface/tokenizers/pull/1047 [#1055]: https://github.com/huggingface/tokenizers/pull/1055 [#1051]: https://github.com/huggingface/tokenizers/pull/1051 [#1052]: https://github.com/huggingface/tokenizers/pull/1052 [#938]: https://github.com/huggingface/tokenizers/pull/938 [#939]: https://github.com/huggingface/tokenizers/pull/939 [#952]: https://github.com/huggingface/tokenizers/pull/952 [#954]: https://github.com/huggingface/tokenizers/pull/954 [#961]: https://github.com/huggingface/tokenizers/pull/961 [#960]: https://github.com/huggingface/tokenizers/pull/960 [#919]: https://github.com/huggingface/tokenizers/pull/919 [#916]: https://github.com/huggingface/tokenizers/pull/916 [#884]: https://github.com/huggingface/tokenizers/pull/884 [#882]: https://github.com/huggingface/tokenizers/pull/882 [#868]: https://github.com/huggingface/tokenizers/pull/868 [#860]: https://github.com/huggingface/tokenizers/pull/860 [#403]: https://github.com/huggingface/tokenizers/pull/403 [#377]: https://github.com/huggingface/tokenizers/pull/377 [#355]: https://github.com/huggingface/tokenizers/pull/355 [#363]: https://github.com/huggingface/tokenizers/pull/363 [#330]: https://github.com/huggingface/tokenizers/pull/330 [#311]: https://github.com/huggingface/tokenizers/pull/311 [#309]: https://github.com/huggingface/tokenizers/pull/309 [#298]: https://github.com/huggingface/tokenizers/pull/298 [#289]: https://github.com/huggingface/tokenizers/pull/289 [#286]: https://github.com/huggingface/tokenizers/pull/286 [#280]: https://github.com/huggingface/tokenizers/pull/280 [#276]: https://github.com/huggingface/tokenizers/pull/276 [#272]: https://github.com/huggingface/tokenizers/pull/272 [#249]: https://github.com/huggingface/tokenizers/pull/249 [b770f36]: https://github.com/huggingface/tokenizers/commit/b770f364280af33efeffea8f0003102cda8cf1b7 [#236]: https://github.com/huggingface/tokenizers/pull/236 [#234]: https://github.com/huggingface/tokenizers/pull/234 [#226]: https://github.com/huggingface/tokenizers/pull/226 [#222]: https://github.com/huggingface/tokenizers/pull/222 [#208]: https://github.com/huggingface/tokenizers/pull/208 [#205]: https://github.com/huggingface/tokenizers/issues/205 [#197]: https://github.com/huggingface/tokenizers/pull/197 [#193]: https://github.com/huggingface/tokenizers/pull/193 [#190]: https://github.com/huggingface/tokenizers/pull/190 [#188]: https://github.com/huggingface/tokenizers/pull/188 [#175]: https://github.com/huggingface/tokenizers/issues/175 [#174]: https://github.com/huggingface/tokenizers/issues/174 [#165]: https://github.com/huggingface/tokenizers/pull/165 [#163]: https://github.com/huggingface/tokenizers/issues/163 [#156]: https://github.com/huggingface/tokenizers/pull/156
0
hf_public_repos/tokenizers
hf_public_repos/tokenizers/tokenizers/README.md
<p align="center"> <br> <img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/> <br> <p> <p align="center"> <img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg"> <a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE"> <img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue"> </a> <a href="https://docs.rs/tokenizers/"> <img alt="Doc" src="https://docs.rs/tokenizers/badge.svg"> </a> </p> <br> The core of `tokenizers`, written in Rust. Provides an implementation of today's most used tokenizers, with a focus on performance and versatility. ## What is a Tokenizer A Tokenizer works as a pipeline, it processes some raw text as input and outputs an `Encoding`. The various steps of the pipeline are: 1. The `Normalizer`: in charge of normalizing the text. Common examples of normalization are the [unicode normalization standards](https://unicode.org/reports/tr15/#Norm_Forms), such as `NFD` or `NFKC`. More details about how to use the `Normalizers` are available on the [Hugging Face blog](https://huggingface.co/docs/tokenizers/components#normalizers) 2. The `PreTokenizer`: in charge of creating initial words splits in the text. The most common way of splitting text is simply on whitespace. 3. The `Model`: in charge of doing the actual tokenization. An example of a `Model` would be `BPE` or `WordPiece`. 4. The `PostProcessor`: in charge of post-processing the `Encoding` to add anything relevant that, for example, a language model would need, such as special tokens. ### Loading a pretrained tokenizer from the Hub ```rust use tokenizers::tokenizer::{Result, Tokenizer}; fn main() -> Result<()> { # #[cfg(feature = "http")] # { let tokenizer = Tokenizer::from_pretrained("bert-base-cased", None)?; let encoding = tokenizer.encode("Hey there!", false)?; println!("{:?}", encoding.get_tokens()); # } Ok(()) } ``` ### Deserialization and tokenization example ```rust use tokenizers::tokenizer::{Result, Tokenizer, EncodeInput}; use tokenizers::models::bpe::BPE; fn main() -> Result<()> { let bpe_builder = BPE::from_file("./path/to/vocab.json", "./path/to/merges.txt"); let bpe = bpe_builder .dropout(0.1) .unk_token("[UNK]".into()) .build()?; let mut tokenizer = Tokenizer::new(bpe); let encoding = tokenizer.encode("Hey there!", false)?; println!("{:?}", encoding.get_tokens()); Ok(()) } ``` ### Training and serialization example ```rust use tokenizers::decoders::DecoderWrapper; use tokenizers::models::bpe::{BpeTrainerBuilder, BPE}; use tokenizers::normalizers::{strip::Strip, unicode::NFC, utils::Sequence, NormalizerWrapper}; use tokenizers::pre_tokenizers::byte_level::ByteLevel; use tokenizers::pre_tokenizers::PreTokenizerWrapper; use tokenizers::processors::PostProcessorWrapper; use tokenizers::{AddedToken, Model, Result, TokenizerBuilder}; use std::path::Path; fn main() -> Result<()> { let vocab_size: usize = 100; let mut trainer = BpeTrainerBuilder::new() .show_progress(true) .vocab_size(vocab_size) .min_frequency(0) .special_tokens(vec![ AddedToken::from(String::from("<s>"), true), AddedToken::from(String::from("<pad>"), true), AddedToken::from(String::from("</s>"), true), AddedToken::from(String::from("<unk>"), true), AddedToken::from(String::from("<mask>"), true), ]) .build(); let mut tokenizer = TokenizerBuilder::new() .with_model(BPE::default()) .with_normalizer(Some(Sequence::new(vec![ Strip::new(true, true).into(), NFC.into(), ]))) .with_pre_tokenizer(Some(ByteLevel::default())) .with_post_processor(Some(ByteLevel::default())) .with_decoder(Some(ByteLevel::default())) .build()?; let pretty = false; tokenizer .train_from_files( &mut trainer, vec!["path/to/vocab.txt".to_string()], )? .save("tokenizer.json", pretty)?; Ok(()) } ``` ## Additional information - tokenizers is designed to leverage CPU parallelism when possible. The level of parallelism is determined by the total number of core/threads your CPU provides but this can be tuned by setting the `RAYON_RS_NUM_THREADS` environment variable. As an example setting `RAYON_RS_NUM_THREADS=4` will allocate a maximum of 4 threads. **_Please note this behavior may evolve in the future_** ## Features **progressbar**: The progress bar visualization is enabled by default. It might be disabled if compilation for certain targets is not supported by the [termios](https://crates.io/crates/termios) dependency of the [indicatif](https://crates.io/crates/indicatif) progress bar.
0
hf_public_repos/tokenizers
hf_public_repos/tokenizers/tokenizers/README.tpl
<p align="center"> <br> <img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/> <br> <p> <p align="center"> <img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg"> <a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE"> <img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue"> </a> <a href="https://docs.rs/tokenizers/"> <img alt="Doc" src="https://docs.rs/tokenizers/badge.svg"> </a> </p> <br> {{readme}}
0
hf_public_repos/tokenizers
hf_public_repos/tokenizers/tokenizers/rust-toolchain
stable
0
hf_public_repos/tokenizers
hf_public_repos/tokenizers/tokenizers/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
0
hf_public_repos/tokenizers
hf_public_repos/tokenizers/tokenizers/Cargo.toml
[package] authors = ["Anthony MOI <[email protected]>", "Nicolas Patry <[email protected]>"] edition = "2018" name = "tokenizers" version = "0.13.3" homepage = "https://github.com/huggingface/tokenizers" repository = "https://github.com/huggingface/tokenizers" documentation = "https://docs.rs/tokenizers/" license = "Apache-2.0" keywords = ["tokenizer", "NLP", "huggingface", "BPE", "WordPiece"] readme = "./README.md" description = """ Provides an implementation of today's most used tokenizers, with a focus on performances and versatility. """ exclude = [ "rust-toolchain", "target/*", "Cargo.lock", "benches/*.txt", "benches/*.json", "data/*" ] [lib] name = "tokenizers" path = "src/lib.rs" bench = false [[bin]] name = "cli" path = "src/cli.rs" bench = false required-features = ["cli"] [[bench]] name = "bpe_benchmark" harness = false [[bench]] name = "bert_benchmark" harness = false [[bench]] name = "layout_benchmark" harness = false [[bench]] name = "unigram_benchmark" harness = false [dependencies] lazy_static = "1.4" rand = "0.8" onig = { version = "6.0", default-features = false, optional = true } regex = "1.8" regex-syntax = "0.7" rayon = "1.7" rayon-cond = "0.1" serde = { version = "1.0", features = [ "derive" ] } serde_json = "1.0" clap = { version = "4.0", features=["derive"], optional = true } unicode-normalization-alignments = "0.1" unicode_categories = "0.1" unicode-segmentation = "1.10" indicatif = {version = "0.15", optional = true} itertools = "0.9" log = "0.4" derive_builder = "0.12" spm_precompiled = "0.1" dirs = { version = "4.0", optional = true } reqwest = { version = "0.11", optional = true } cached-path = { version = "0.6", optional = true } aho-corasick = "0.7" paste = "1.0.6" macro_rules_attribute = "0.1.2" thiserror = "1.0.30" fancy-regex = { version = "0.10", optional = true} getrandom = { version = "0.2.6" } esaxx-rs = { version = "0.1", default-features = false, features=[]} monostate = "0.1.5" [features] default = ["progressbar", "http", "cli", "onig", "esaxx_fast"] esaxx_fast = ["esaxx-rs/cpp"] progressbar = ["indicatif"] http = ["reqwest", "cached-path", "dirs"] cli = ["clap"] unstable_wasm = ["fancy-regex", "getrandom/js"] [dev-dependencies] criterion = "0.4" tempfile = "3.1" assert_approx_eq = "1.1" [profile.release] lto = "fat"
0
hf_public_repos/tokenizers
hf_public_repos/tokenizers/tokenizers/Makefile
DATA_DIR = data BENCHMARK_DIR = benches TESTS_DIR = tests dir_guard=@mkdir -p $(@D) SHARED_RESOURCES = $(DATA_DIR)/gpt2-vocab.json $(DATA_DIR)/gpt2-merges.txt $(DATA_DIR)/bert-base-uncased-vocab.txt $(DATA_DIR)/big.txt $(DATA_DIR)/small.txt BENCHMARK_RESOURCES = $(SHARED_RESOURCES) TESTS_RESOURCES = $(SHARED_RESOURCES) $(DATA_DIR)/unigram.json $(DATA_DIR)/unigram_wagahaiwa_nekodearu.txt $(DATA_DIR)/albert-base-v1-tokenizer.json $(DATA_DIR)/roberta.json $(DATA_DIR)/tokenizer-wiki.json $(DATA_DIR)/bert-wiki.json .PHONY : build build : cargo build --all-targets .PHONY : release release : cargo build --release .PHONY : format format : cargo fmt -- .PHONY : lint lint : cargo fmt -- --check cargo fmt -- $(BENCHMARK_DIR)/*.rs --check cargo clippy --all-targets --all-features -- -D warnings .PHONY : test test : $(TESTS_RESOURCES) cargo test .PHONY : doc doc : cargo doc .PHONY : publish publish : cargo publish .PHONY : all-checks all-checks : lint test doc .PHONY : bench bench : $(BENCHMARK_RESOURCES) cargo bench -- --verbose $(DATA_DIR)/gpt2-% : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-$* -O $@ $(DATA_DIR)/bert-% : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-$* -O $@ $(DATA_DIR)/unigram% : $(dir_guard) wget https://huggingface.co/Narsil/small/raw/main/unigram$* -O $@ $(DATA_DIR)/albert-base-v1-tokenizer.json : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-tokenizer.json -O $@ $(DATA_DIR)/big.txt : $(dir_guard) wget https://norvig.com/big.txt -O $@ $(DATA_DIR)/small.txt : $(DATA_DIR)/big.txt head -100 $(DATA_DIR)/big.txt > $@ $(DATA_DIR)/roberta.json : $(dir_guard) wget https://huggingface.co/Narsil/small/raw/main/roberta.json -O $@ $(DATA_DIR)/tokenizer-wiki.json : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-quicktour/tokenizer.json -O $@ $(DATA_DIR)/bert-wiki.json : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-pipeline/tokenizer.json -O $@
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/examples/serialization.rs
use tokenizers::models::wordpiece::WordPiece; use tokenizers::{AddedToken, Tokenizer}; fn main() { let start = std::time::Instant::now(); let mut tokenizer = Tokenizer::new(WordPiece::default()); // Mix special and not special // You can make sure ids are in order, and special status is correct. let tokens: Vec<_> = (0..120_000) .map(|i| AddedToken::from(format!("[SPECIAL_{}]", i), i % 2 == 0)) .collect(); tokenizer.add_tokens(&tokens); tokenizer.save("_tok.json", true).unwrap(); println!("Save took {:?}", start.elapsed()); let start = std::time::Instant::now(); let _tok = Tokenizer::from_file("_tok.json").unwrap(); println!("Took {:?}", start.elapsed()); std::fs::remove_file("_tok.json").unwrap(); }
0
hf_public_repos/tokenizers/tokenizers/examples
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/README.md
<div align="center"> <h1><code>wasm-pack-template</code></h1> <strong>A template for kick starting a Rust and WebAssembly project using <a href="https://github.com/rustwasm/wasm-pack">wasm-pack</a>.</strong> <p> <a href="https://travis-ci.org/rustwasm/wasm-pack-template"><img src="https://img.shields.io/travis/rustwasm/wasm-pack-template.svg?style=flat-square" alt="Build Status" /></a> </p> <h3> <a href="https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html">Tutorial</a> <span> | </span> <a href="https://discordapp.com/channels/442252698964721669/443151097398296587">Chat</a> </h3> <sub>Built with 🦀🕸 by <a href="https://rustwasm.github.io/">The Rust and WebAssembly Working Group</a></sub> </div> ## About This is an example project showing off a very basic use case for `wasm` tokenizers usage. [**📚 Read this template tutorial! 📚**][template-docs] This template is designed for compiling Rust libraries into WebAssembly and publishing the resulting package to NPM. Be sure to check out [other `wasm-pack` tutorials online][tutorials] for other templates and usages of `wasm-pack`. [tutorials]: https://rustwasm.github.io/docs/wasm-pack/tutorials/index.html [template-docs]: https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html ## 🚴 Usage ### 🐑 Use `cargo generate` to Clone this Template [Learn more about `cargo generate` here.](https://github.com/ashleygwilliams/cargo-generate) ``` cargo generate --git https://github.com/rustwasm/wasm-pack-template.git --name my-project cd my-project ``` ### 🛠️ Build with `wasm-pack build` ``` wasm-pack build ``` ### 🔬 Test in Headless Browsers with `wasm-pack test` ``` wasm-pack test --headless --firefox ``` ### 🎁 Publish to NPM with `wasm-pack publish` ``` wasm-pack publish ``` ## 🔋 Batteries Included * [`wasm-bindgen`](https://github.com/rustwasm/wasm-bindgen) for communicating between WebAssembly and JavaScript. * [`console_error_panic_hook`](https://github.com/rustwasm/console_error_panic_hook) for logging panic messages to the developer console. * [`wee_alloc`](https://github.com/rustwasm/wee_alloc), an allocator optimized for small code size.
0
hf_public_repos/tokenizers/tokenizers/examples
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/Cargo.toml
[package] name = "unstable_wasm" version = "0.1.0" authors = ["Nicolas Patry"] edition = "2018" [lib] crate-type = ["cdylib", "rlib"] [features] default = ["console_error_panic_hook"] [dependencies] wasm-bindgen = "0.2.63" # The `console_error_panic_hook` crate provides better debugging of panics by # logging them with `console.error`. This is great for development, but requires # all the `std::fmt` and `std::panicking` infrastructure, so isn't great for # code size when deploying. console_error_panic_hook = { version = "0.1.6", optional = true } # `wee_alloc` is a tiny allocator for wasm that is only ~1K in code size # compared to the default allocator's ~10K. It is slower than the default # allocator, however. # # Unfortunately, `wee_alloc` requires nightly Rust when targeting wasm for now. wee_alloc = { version = "0.4.5", optional = true } tokenizers = { path = "../../", default-features=false, features = ["unstable_wasm"]} [dev-dependencies] wasm-bindgen-test = "0.3.13" [profile.release] # Tell `rustc` to optimize for small code size. opt-level = "s"
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/tests/web.rs
//! Test suite for the Web and headless browsers. #![cfg(target_arch = "wasm32")] extern crate wasm_bindgen_test; use wasm_bindgen_test::*; wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] fn pass() { assert_eq!(1 + 1, 2); }
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/package.json
{ "name": "create-wasm-app", "version": "0.1.0", "description": "create an app to consume rust-generated wasm packages", "main": "index.js", "bin": { "create-wasm-app": ".bin/create-wasm-app.js" }, "scripts": { "build": "webpack --config webpack.config.js", "start": "NODE_OPTIONS=--openssl-legacy-provider webpack-dev-server" }, "repository": { "type": "git", "url": "git+https://github.com/rustwasm/create-wasm-app.git" }, "keywords": ["webassembly", "wasm", "rust", "webpack"], "author": "Ashley Williams <[email protected]>", "license": "(MIT OR Apache-2.0)", "bugs": { "url": "https://github.com/rustwasm/create-wasm-app/issues" }, "homepage": "https://github.com/rustwasm/create-wasm-app#readme", "devDependencies": { "copy-webpack-plugin": "^11.0.0", "webpack": "^5.75.0", "webpack-cli": "^5.0.1", "webpack-dev-server": "^4.10.0" }, "dependencies": { "unstable_wasm": "file:../pkg" } }
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/README.md
<div align="center"> <h1><code>create-wasm-app</code></h1> <strong>An <code>npm init</code> template for kick starting a project that uses NPM packages containing Rust-generated WebAssembly and bundles them with Webpack.</strong> <p> <a href="https://travis-ci.org/rustwasm/create-wasm-app"><img src="https://img.shields.io/travis/rustwasm/create-wasm-app.svg?style=flat-square" alt="Build Status" /></a> </p> <h3> <a href="#usage">Usage</a> <span> | </span> <a href="https://discordapp.com/channels/442252698964721669/443151097398296587">Chat</a> </h3> <sub>Built with 🦀🕸 by <a href="https://rustwasm.github.io/">The Rust and WebAssembly Working Group</a></sub> </div> ## About This template is designed for depending on NPM packages that contain Rust-generated WebAssembly and using them to create a Website. * Want to create an NPM package with Rust and WebAssembly? [Check out `wasm-pack-template`.](https://github.com/rustwasm/wasm-pack-template) * Want to make a monorepo-style Website without publishing to NPM? Check out [`rust-webpack-template`](https://github.com/rustwasm/rust-webpack-template) and/or [`rust-parcel-template`](https://github.com/rustwasm/rust-parcel-template). ## 🚴 Usage ``` npm init wasm-app ``` ## 🔋 Batteries Included - `.gitignore`: ignores `node_modules` - `LICENSE-APACHE` and `LICENSE-MIT`: most Rust projects are licensed this way, so these are included for you - `README.md`: the file you are reading now! - `index.html`: a bare bones html document that includes the webpack bundle - `index.js`: example js file with a comment showing how to import and use a wasm pkg - `package.json` and `package-lock.json`: - pulls in devDependencies for using webpack: - [`webpack`](https://www.npmjs.com/package/webpack) - [`webpack-cli`](https://www.npmjs.com/package/webpack-cli) - [`webpack-dev-server`](https://www.npmjs.com/package/webpack-dev-server) - defines a `start` script to run `webpack-dev-server` - `webpack.config.js`: configuration file for bundling your js with webpack ## License Licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/index.js
import * as wasm from "unstable_wasm"; console.log(wasm.tokenize("ab")); console.log(wasm.tokenize("abc"));
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/index.html
<!DOCTYPE html> <html> <head> <meta charset="utf-8"> <title>Hello wasm-pack!</title> </head> <body> <noscript>This page contains webassembly and javascript content, please enable javascript in your browser.</noscript> <script src="./bootstrap.js"></script> </body> </html>
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/webpack.config.js
const CopyWebpackPlugin = require("copy-webpack-plugin"); const path = require('path'); module.exports = { entry: "./bootstrap.js", output: { path: path.resolve(__dirname, "dist"), filename: "bootstrap.js", }, mode: "development", plugins: [ new CopyWebpackPlugin(['index.html']) ], };
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/package-lock.json
{ "name": "create-wasm-app", "version": "0.1.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "create-wasm-app", "version": "0.1.0", "license": "(MIT OR Apache-2.0)", "dependencies": { "unstable_wasm": "file:../pkg" }, "bin": { "create-wasm-app": ".bin/create-wasm-app.js" }, "devDependencies": { "copy-webpack-plugin": "^11.0.0", "webpack": "^5.75.0", "webpack-cli": "^5.0.1", "webpack-dev-server": "^4.10.0" } }, "../pkg": { "name": "unstable_wasm", "version": "0.0.1" }, "node_modules/@discoveryjs/json-ext": { "version": "0.5.7", "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", "dev": true, "engines": { "node": ">=10.0.0" } }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz", "integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==", "dev": true, "dependencies": { "@jridgewell/set-array": "^1.0.1", "@jridgewell/sourcemap-codec": "^1.4.10", "@jridgewell/trace-mapping": "^0.3.9" }, "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/resolve-uri": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", "dev": true, "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/set-array": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", "dev": true, "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/source-map": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.2.tgz", "integrity": "sha512-m7O9o2uR8k2ObDysZYzdfhb08VuEml5oWGiosa1VdaPZ/A6QyPkAJuwN0Q1lhULOf6B7MtQmHENS743hWtCrgw==", "dev": true, "dependencies": { "@jridgewell/gen-mapping": "^0.3.0", "@jridgewell/trace-mapping": "^0.3.9" } }, "node_modules/@jridgewell/sourcemap-codec": { "version": "1.4.14", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==", "dev": true }, "node_modules/@jridgewell/trace-mapping": { "version": "0.3.17", "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.17.tgz", "integrity": "sha512-MCNzAp77qzKca9+W/+I0+sEpaUnZoeasnghNeVc41VZCEKaCH73Vq3BZZ/SzWIgrqE4H4ceI+p+b6C0mHf9T4g==", "dev": true, "dependencies": { "@jridgewell/resolve-uri": "3.1.0", "@jridgewell/sourcemap-codec": "1.4.14" } }, "node_modules/@leichtgewicht/ip-codec": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz", "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==", "dev": true }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", "dev": true, "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" }, "engines": { "node": ">= 8" } }, "node_modules/@nodelib/fs.stat": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", "dev": true, "engines": { "node": ">= 8" } }, "node_modules/@nodelib/fs.walk": { "version": "1.2.8", "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", "dev": true, "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" }, "engines": { "node": ">= 8" } }, "node_modules/@types/body-parser": { "version": "1.19.2", "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==", "dev": true, "dependencies": { "@types/connect": "*", "@types/node": "*" } }, "node_modules/@types/bonjour": { "version": "3.5.10", "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.10.tgz", "integrity": "sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw==", "dev": true, "dependencies": { "@types/node": "*" } }, "node_modules/@types/connect": { "version": "3.4.35", "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz", "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==", "dev": true, "dependencies": { "@types/node": "*" } }, "node_modules/@types/connect-history-api-fallback": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.3.5.tgz", "integrity": "sha512-h8QJa8xSb1WD4fpKBDcATDNGXghFj6/3GRWG6dhmRcu0RX1Ubasur2Uvx5aeEwlf0MwblEC2bMzzMQntxnw/Cw==", "dev": true, "dependencies": { "@types/express-serve-static-core": "*", "@types/node": "*" } }, "node_modules/@types/eslint": { "version": "8.4.10", "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.4.10.tgz", "integrity": "sha512-Sl/HOqN8NKPmhWo2VBEPm0nvHnu2LL3v9vKo8MEq0EtbJ4eVzGPl41VNPvn5E1i5poMk4/XD8UriLHpJvEP/Nw==", "dev": true, "dependencies": { "@types/estree": "*", "@types/json-schema": "*" } }, "node_modules/@types/eslint-scope": { "version": "3.7.4", "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.4.tgz", "integrity": "sha512-9K4zoImiZc3HlIp6AVUDE4CWYx22a+lhSZMYNpbjW04+YF0KWj4pJXnEMjdnFTiQibFFmElcsasJXDbdI/EPhA==", "dev": true, "dependencies": { "@types/eslint": "*", "@types/estree": "*" } }, "node_modules/@types/estree": { "version": "0.0.51", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-0.0.51.tgz", "integrity": "sha512-CuPgU6f3eT/XgKKPqKd/gLZV1Xmvf1a2R5POBOGQa6uv82xpls89HU5zKeVoyR8XzHd1RGNOlQlvUe3CFkjWNQ==", "dev": true }, "node_modules/@types/express": { "version": "4.17.13", "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.13.tgz", "integrity": "sha512-6bSZTPaTIACxn48l50SR+axgrqm6qXFIxrdAKaG6PaJk3+zuUr35hBlgT7vOmJcum+OEaIBLtHV/qloEAFITeA==", "dev": true, "dependencies": { "@types/body-parser": "*", "@types/express-serve-static-core": "^4.17.18", "@types/qs": "*", "@types/serve-static": "*" } }, "node_modules/@types/express-serve-static-core": { "version": "4.17.30", "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.30.tgz", "integrity": "sha512-gstzbTWro2/nFed1WXtf+TtrpwxH7Ggs4RLYTLbeVgIkUQOI3WG/JKjgeOU1zXDvezllupjrf8OPIdvTbIaVOQ==", "dev": true, "dependencies": { "@types/node": "*", "@types/qs": "*", "@types/range-parser": "*" } }, "node_modules/@types/http-proxy": { "version": "1.17.9", "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.9.tgz", "integrity": "sha512-QsbSjA/fSk7xB+UXlCT3wHBy5ai9wOcNDWwZAtud+jXhwOM3l+EYZh8Lng4+/6n8uar0J7xILzqftJdJ/Wdfkw==", "dev": true, "dependencies": { "@types/node": "*" } }, "node_modules/@types/json-schema": { "version": "7.0.11", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==", "dev": true }, "node_modules/@types/mime": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/@types/mime/-/mime-3.0.1.tgz", "integrity": "sha512-Y4XFY5VJAuw0FgAqPNd6NNoV44jbq9Bz2L7Rh/J6jLTiHBSBJa9fxqQIvkIld4GsoDOcCbvzOUAbLPsSKKg+uA==", "dev": true }, "node_modules/@types/node": { "version": "18.7.13", "resolved": "https://registry.npmjs.org/@types/node/-/node-18.7.13.tgz", "integrity": "sha512-46yIhxSe5xEaJZXWdIBP7GU4HDTG8/eo0qd9atdiL+lFpA03y8KS+lkTN834TWJj5767GbWv4n/P6efyTFt1Dw==", "dev": true }, "node_modules/@types/qs": { "version": "6.9.7", "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz", "integrity": "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==", "dev": true }, "node_modules/@types/range-parser": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz", "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==", "dev": true }, "node_modules/@types/retry": { "version": "0.12.0", "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", "dev": true }, "node_modules/@types/serve-index": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.1.tgz", "integrity": "sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg==", "dev": true, "dependencies": { "@types/express": "*" } }, "node_modules/@types/serve-static": { "version": "1.15.0", "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.0.tgz", "integrity": "sha512-z5xyF6uh8CbjAu9760KDKsH2FcDxZ2tFCsA4HIMWE6IkiYMXfVoa+4f9KX+FN0ZLsaMw1WNG2ETLA6N+/YA+cg==", "dev": true, "dependencies": { "@types/mime": "*", "@types/node": "*" } }, "node_modules/@types/sockjs": { "version": "0.3.33", "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.33.tgz", "integrity": "sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw==", "dev": true, "dependencies": { "@types/node": "*" } }, "node_modules/@types/ws": { "version": "8.5.3", "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.3.tgz", "integrity": "sha512-6YOoWjruKj1uLf3INHH7D3qTXwFfEsg1kf3c0uDdSBJwfa/llkwIjrAGV7j7mVgGNbzTQ3HiHKKDXl6bJPD97w==", "dev": true, "dependencies": { "@types/node": "*" } }, "node_modules/@webassemblyjs/ast": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.1.tgz", "integrity": "sha512-ukBh14qFLjxTQNTXocdyksN5QdM28S1CxHt2rdskFyL+xFV7VremuBLVbmCePj+URalXBENx/9Lm7lnhihtCSw==", "dev": true, "dependencies": { "@webassemblyjs/helper-numbers": "1.11.1", "@webassemblyjs/helper-wasm-bytecode": "1.11.1" } }, "node_modules/@webassemblyjs/floating-point-hex-parser": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.1.tgz", "integrity": "sha512-iGRfyc5Bq+NnNuX8b5hwBrRjzf0ocrJPI6GWFodBFzmFnyvrQ83SHKhmilCU/8Jv67i4GJZBMhEzltxzcNagtQ==", "dev": true }, "node_modules/@webassemblyjs/helper-api-error": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.1.tgz", "integrity": "sha512-RlhS8CBCXfRUR/cwo2ho9bkheSXG0+NwooXcc3PAILALf2QLdFyj7KGsKRbVc95hZnhnERon4kW/D3SZpp6Tcg==", "dev": true }, "node_modules/@webassemblyjs/helper-buffer": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.1.tgz", "integrity": "sha512-gwikF65aDNeeXa8JxXa2BAk+REjSyhrNC9ZwdT0f8jc4dQQeDQ7G4m0f2QCLPJiMTTO6wfDmRmj/pW0PsUvIcA==", "dev": true }, "node_modules/@webassemblyjs/helper-numbers": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.1.tgz", "integrity": "sha512-vDkbxiB8zfnPdNK9Rajcey5C0w+QJugEglN0of+kmO8l7lDb77AnlKYQF7aarZuCrv+l0UvqL+68gSDr3k9LPQ==", "dev": true, "dependencies": { "@webassemblyjs/floating-point-hex-parser": "1.11.1", "@webassemblyjs/helper-api-error": "1.11.1", "@xtuc/long": "4.2.2" } }, "node_modules/@webassemblyjs/helper-wasm-bytecode": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.1.tgz", "integrity": "sha512-PvpoOGiJwXeTrSf/qfudJhwlvDQxFgelbMqtq52WWiXC6Xgg1IREdngmPN3bs4RoO83PnL/nFrxucXj1+BX62Q==", "dev": true }, "node_modules/@webassemblyjs/helper-wasm-section": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.1.tgz", "integrity": "sha512-10P9No29rYX1j7F3EVPX3JvGPQPae+AomuSTPiF9eBQeChHI6iqjMIwR9JmOJXwpnn/oVGDk7I5IlskuMwU/pg==", "dev": true, "dependencies": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-buffer": "1.11.1", "@webassemblyjs/helper-wasm-bytecode": "1.11.1", "@webassemblyjs/wasm-gen": "1.11.1" } }, "node_modules/@webassemblyjs/ieee754": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.1.tgz", "integrity": "sha512-hJ87QIPtAMKbFq6CGTkZYJivEwZDbQUgYd3qKSadTNOhVY7p+gfP6Sr0lLRVTaG1JjFj+r3YchoqRYxNH3M0GQ==", "dev": true, "dependencies": { "@xtuc/ieee754": "^1.2.0" } }, "node_modules/@webassemblyjs/leb128": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.1.tgz", "integrity": "sha512-BJ2P0hNZ0u+Th1YZXJpzW6miwqQUGcIHT1G/sf72gLVD9DZ5AdYTqPNbHZh6K1M5VmKvFXwGSWZADz+qBWxeRw==", "dev": true, "dependencies": { "@xtuc/long": "4.2.2" } }, "node_modules/@webassemblyjs/utf8": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.1.tgz", "integrity": "sha512-9kqcxAEdMhiwQkHpkNiorZzqpGrodQQ2IGrHHxCy+Ozng0ofyMA0lTqiLkVs1uzTRejX+/O0EOT7KxqVPuXosQ==", "dev": true }, "node_modules/@webassemblyjs/wasm-edit": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.1.tgz", "integrity": "sha512-g+RsupUC1aTHfR8CDgnsVRVZFJqdkFHpsHMfJuWQzWU3tvnLC07UqHICfP+4XyL2tnr1amvl1Sdp06TnYCmVkA==", "dev": true, "dependencies": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-buffer": "1.11.1", "@webassemblyjs/helper-wasm-bytecode": "1.11.1", "@webassemblyjs/helper-wasm-section": "1.11.1", "@webassemblyjs/wasm-gen": "1.11.1", "@webassemblyjs/wasm-opt": "1.11.1", "@webassemblyjs/wasm-parser": "1.11.1", "@webassemblyjs/wast-printer": "1.11.1" } }, "node_modules/@webassemblyjs/wasm-gen": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.1.tgz", "integrity": "sha512-F7QqKXwwNlMmsulj6+O7r4mmtAlCWfO/0HdgOxSklZfQcDu0TpLiD1mRt/zF25Bk59FIjEuGAIyn5ei4yMfLhA==", "dev": true, "dependencies": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-wasm-bytecode": "1.11.1", "@webassemblyjs/ieee754": "1.11.1", "@webassemblyjs/leb128": "1.11.1", "@webassemblyjs/utf8": "1.11.1" } }, "node_modules/@webassemblyjs/wasm-opt": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.1.tgz", "integrity": "sha512-VqnkNqnZlU5EB64pp1l7hdm3hmQw7Vgqa0KF/KCNO9sIpI6Fk6brDEiX+iCOYrvMuBWDws0NkTOxYEb85XQHHw==", "dev": true, "dependencies": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-buffer": "1.11.1", "@webassemblyjs/wasm-gen": "1.11.1", "@webassemblyjs/wasm-parser": "1.11.1" } }, "node_modules/@webassemblyjs/wasm-parser": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.1.tgz", "integrity": "sha512-rrBujw+dJu32gYB7/Lup6UhdkPx9S9SnobZzRVL7VcBH9Bt9bCBLEuX/YXOOtBsOZ4NQrRykKhffRWHvigQvOA==", "dev": true, "dependencies": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-api-error": "1.11.1", "@webassemblyjs/helper-wasm-bytecode": "1.11.1", "@webassemblyjs/ieee754": "1.11.1", "@webassemblyjs/leb128": "1.11.1", "@webassemblyjs/utf8": "1.11.1" } }, "node_modules/@webassemblyjs/wast-printer": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.1.tgz", "integrity": "sha512-IQboUWM4eKzWW+N/jij2sRatKMh99QEelo3Eb2q0qXkvPRISAj8Qxtmw5itwqK+TTkBuUIE45AxYPToqPtL5gg==", "dev": true, "dependencies": { "@webassemblyjs/ast": "1.11.1", "@xtuc/long": "4.2.2" } }, "node_modules/@webpack-cli/configtest": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/@webpack-cli/configtest/-/configtest-2.0.1.tgz", "integrity": "sha512-njsdJXJSiS2iNbQVS0eT8A/KPnmyH4pv1APj2K0d1wrZcBLw+yppxOy4CGqa0OxDJkzfL/XELDhD8rocnIwB5A==", "dev": true, "engines": { "node": ">=14.15.0" }, "peerDependencies": { "webpack": "5.x.x", "webpack-cli": "5.x.x" } }, "node_modules/@webpack-cli/info": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/@webpack-cli/info/-/info-2.0.1.tgz", "integrity": "sha512-fE1UEWTwsAxRhrJNikE7v4EotYflkEhBL7EbajfkPlf6E37/2QshOy/D48Mw8G5XMFlQtS6YV42vtbG9zBpIQA==", "dev": true, "engines": { "node": ">=14.15.0" }, "peerDependencies": { "webpack": "5.x.x", "webpack-cli": "5.x.x" } }, "node_modules/@webpack-cli/serve": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/@webpack-cli/serve/-/serve-2.0.1.tgz", "integrity": "sha512-0G7tNyS+yW8TdgHwZKlDWYXFA6OJQnoLCQvYKkQP0Q2X205PSQ6RNUj0M+1OB/9gRQaUZ/ccYfaxd0nhaWKfjw==", "dev": true, "engines": { "node": ">=14.15.0" }, "peerDependencies": { "webpack": "5.x.x", "webpack-cli": "5.x.x" }, "peerDependenciesMeta": { "webpack-dev-server": { "optional": true } } }, "node_modules/@xtuc/ieee754": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", "dev": true }, "node_modules/@xtuc/long": { "version": "4.2.2", "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", "dev": true }, "node_modules/accepts": { "version": "1.3.8", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", "dev": true, "dependencies": { "mime-types": "~2.1.34", "negotiator": "0.6.3" }, "engines": { "node": ">= 0.6" } }, "node_modules/acorn": { "version": "8.8.1", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.1.tgz", "integrity": "sha512-7zFpHzhnqYKrkYdUjF1HI1bzd0VygEGX8lFk4k5zVMqHEoES+P+7TKI+EvLO9WVMJ8eekdO0aDEK044xTXwPPA==", "dev": true, "bin": { "acorn": "bin/acorn" }, "engines": { "node": ">=0.4.0" } }, "node_modules/acorn-import-assertions": { "version": "1.8.0", "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.8.0.tgz", "integrity": "sha512-m7VZ3jwz4eK6A4Vtt8Ew1/mNbP24u0FhdyfA7fSvnJR6LMdfOYnmuIrrJAgrYfYJ10F/otaHTtrtrtmHdMNzEw==", "dev": true, "peerDependencies": { "acorn": "^8" } }, "node_modules/ajv": { "version": "8.11.2", "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.2.tgz", "integrity": "sha512-E4bfmKAhGiSTvMfL1Myyycaub+cUEU2/IvpylXkUu7CHBkBj1f/ikdzbD7YQ6FKUbixDxeYvB/xY4fvyroDlQg==", "dev": true, "dependencies": { "fast-deep-equal": "^3.1.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2", "uri-js": "^4.2.2" }, "funding": { "type": "github", "url": "https://github.com/sponsors/epoberezkin" } }, "node_modules/ajv-formats": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", "dev": true, "dependencies": { "ajv": "^8.0.0" }, "peerDependencies": { "ajv": "^8.0.0" }, "peerDependenciesMeta": { "ajv": { "optional": true } } }, "node_modules/ajv-keywords": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", "dev": true, "dependencies": { "fast-deep-equal": "^3.1.3" }, "peerDependencies": { "ajv": "^8.8.2" } }, "node_modules/ansi-html-community": { "version": "0.0.8", "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", "dev": true, "engines": [ "node >= 0.8.0" ], "bin": { "ansi-html": "bin/ansi-html" } }, "node_modules/anymatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", "dev": true, "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" }, "engines": { "node": ">= 8" } }, "node_modules/array-flatten": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==", "dev": true }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "dev": true }, "node_modules/batch": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==", "dev": true }, "node_modules/binary-extensions": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", "dev": true, "engines": { "node": ">=8" } }, "node_modules/body-parser": { "version": "1.20.0", "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.0.tgz", "integrity": "sha512-DfJ+q6EPcGKZD1QWUjSpqp+Q7bDQTsQIF4zfUAtZ6qk+H/3/QRhg9CEp39ss+/T2vw0+HaidC0ecJj/DRLIaKg==", "dev": true, "dependencies": { "bytes": "3.1.2", "content-type": "~1.0.4", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", "qs": "6.10.3", "raw-body": "2.5.1", "type-is": "~1.6.18", "unpipe": "1.0.0" }, "engines": { "node": ">= 0.8", "npm": "1.2.8000 || >= 1.4.16" } }, "node_modules/body-parser/node_modules/bytes": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", "dev": true, "engines": { "node": ">= 0.8" } }, "node_modules/bonjour-service": { "version": "1.0.13", "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.0.13.tgz", "integrity": "sha512-LWKRU/7EqDUC9CTAQtuZl5HzBALoCYwtLhffW3et7vZMwv3bWLpJf8bRYlMD5OCcDpTfnPgNCV4yo9ZIaJGMiA==", "dev": true, "dependencies": { "array-flatten": "^2.1.2", "dns-equal": "^1.0.0", "fast-deep-equal": "^3.1.3", "multicast-dns": "^7.2.5" } }, "node_modules/brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", "dev": true, "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "node_modules/braces": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", "dev": true, "dependencies": { "fill-range": "^7.0.1" }, "engines": { "node": ">=8" } }, "node_modules/browserslist": { "version": "4.21.4", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.4.tgz", "integrity": "sha512-CBHJJdDmgjl3daYjN5Cp5kbTf1mUhZoS+beLklHIvkOWscs83YAhLlF3Wsh/lciQYAcbBJgTOD44VtG31ZM4Hw==", "dev": true, "funding": [ { "type": "opencollective", "url": "https://opencollective.com/browserslist" }, { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/browserslist" } ], "dependencies": { "caniuse-lite": "^1.0.30001400", "electron-to-chromium": "^1.4.251", "node-releases": "^2.0.6", "update-browserslist-db": "^1.0.9" }, "bin": { "browserslist": "cli.js" }, "engines": { "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" } }, "node_modules/buffer-from": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", "dev": true }, "node_modules/bytes": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", "dev": true, "engines": { "node": ">= 0.8" } }, "node_modules/call-bind": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", "dev": true, "dependencies": { "function-bind": "^1.1.1", "get-intrinsic": "^1.0.2" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/caniuse-lite": { "version": "1.0.30001441", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001441.tgz", "integrity": "sha512-OyxRR4Vof59I3yGWXws6i908EtGbMzVUi3ganaZQHmydk1iwDhRnvaPG2WaR0KcqrDFKrxVZHULT396LEPhXfg==", "dev": true, "funding": [ { "type": "opencollective", "url": "https://opencollective.com/browserslist" }, { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/caniuse-lite" } ] }, "node_modules/chokidar": { "version": "3.5.3", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", "dev": true, "funding": [ { "type": "individual", "url": "https://paulmillr.com/funding/" } ], "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", "glob-parent": "~5.1.2", "is-binary-path": "~2.1.0", "is-glob": "~4.0.1", "normalize-path": "~3.0.0", "readdirp": "~3.6.0" }, "engines": { "node": ">= 8.10.0" }, "optionalDependencies": { "fsevents": "~2.3.2" } }, "node_modules/chokidar/node_modules/glob-parent": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", "dev": true, "dependencies": { "is-glob": "^4.0.1" }, "engines": { "node": ">= 6" } }, "node_modules/chrome-trace-event": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==", "dev": true, "engines": { "node": ">=6.0" } }, "node_modules/clone-deep": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", "dev": true, "dependencies": { "is-plain-object": "^2.0.4", "kind-of": "^6.0.2", "shallow-clone": "^3.0.0" }, "engines": { "node": ">=6" } }, "node_modules/colorette": { "version": "2.0.19", "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.19.tgz", "integrity": "sha512-3tlv/dIP7FWvj3BsbHrGLJ6l/oKh1O3TcgBqMn+yyCagOxc23fyzDS6HypQbgxWbkpDnf52p1LuR4eWDQ/K9WQ==", "dev": true }, "node_modules/commander": { "version": "2.20.3", "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", "dev": true }, "node_modules/compressible": { "version": "2.0.18", "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", "dev": true, "dependencies": { "mime-db": ">= 1.43.0 < 2" }, "engines": { "node": ">= 0.6" } }, "node_modules/compression": { "version": "1.7.4", "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", "dev": true, "dependencies": { "accepts": "~1.3.5", "bytes": "3.0.0", "compressible": "~2.0.16", "debug": "2.6.9", "on-headers": "~1.0.2", "safe-buffer": "5.1.2", "vary": "~1.1.2" }, "engines": { "node": ">= 0.8.0" } }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", "dev": true }, "node_modules/connect-history-api-fallback": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", "dev": true, "engines": { "node": ">=0.8" } }, "node_modules/content-disposition": { "version": "0.5.4", "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", "dev": true, "dependencies": { "safe-buffer": "5.2.1" }, "engines": { "node": ">= 0.6" } }, "node_modules/content-disposition/node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", "dev": true, "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ] }, "node_modules/content-type": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", "dev": true, "engines": { "node": ">= 0.6" } }, "node_modules/cookie": { "version": "0.5.0", "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", "dev": true, "engines": { "node": ">= 0.6" } }, "node_modules/cookie-signature": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==", "dev": true }, "node_modules/copy-webpack-plugin": { "version": "11.0.0", "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", "dev": true, "dependencies": { "fast-glob": "^3.2.11", "glob-parent": "^6.0.1", "globby": "^13.1.1", "normalize-path": "^3.0.0", "schema-utils": "^4.0.0", "serialize-javascript": "^6.0.0" }, "engines": { "node": ">= 14.15.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/webpack" }, "peerDependencies": { "webpack": "^5.1.0" } }, "node_modules/core-util-is": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", "dev": true }, "node_modules/cross-spawn": { "version": "7.0.3", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", "dev": true, "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" }, "engines": { "node": ">= 8" } }, "node_modules/debug": { "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", "dev": true, "dependencies": { "ms": "2.0.0" } }, "node_modules/default-gateway": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", "dev": true, "dependencies": { "execa": "^5.0.0" }, "engines": { "node": ">= 10" } }, "node_modules/define-lazy-prop": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", "dev": true, "engines": { "node": ">=8" } }, "node_modules/depd": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", "dev": true, "engines": { "node": ">= 0.8" } }, "node_modules/destroy": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", "dev": true, "engines": { "node": ">= 0.8", "npm": "1.2.8000 || >= 1.4.16" } }, "node_modules/detect-node": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==", "dev": true }, "node_modules/dir-glob": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", "dev": true, "dependencies": { "path-type": "^4.0.0" }, "engines": { "node": ">=8" } }, "node_modules/dns-equal": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg==", "dev": true }, "node_modules/dns-packet": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.4.0.tgz", "integrity": "sha512-EgqGeaBB8hLiHLZtp/IbaDQTL8pZ0+IvwzSHA6d7VyMDM+B9hgddEMa9xjK5oYnw0ci0JQ6g2XCD7/f6cafU6g==", "dev": true, "dependencies": { "@leichtgewicht/ip-codec": "^2.0.1" }, "engines": { "node": ">=6" } }, "node_modules/ee-first": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", "dev": true }, "node_modules/electron-to-chromium": { "version": "1.4.284", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.284.tgz", "integrity": "sha512-M8WEXFuKXMYMVr45fo8mq0wUrrJHheiKZf6BArTKk9ZBYCKJEOU5H8cdWgDT+qCVZf7Na4lVUaZsA+h6uA9+PA==", "dev": true }, "node_modules/encodeurl": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", "dev": true, "engines": { "node": ">= 0.8" } }, "node_modules/enhanced-resolve": { "version": "5.12.0", "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.12.0.tgz", "integrity": "sha512-QHTXI/sZQmko1cbDoNAa3mJ5qhWUUNAq3vR0/YiD379fWQrcfuoX1+HW2S0MTt7XmoPLapdaDKUtelUSPic7hQ==", "dev": true, "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" }, "engines": { "node": ">=10.13.0" } }, "node_modules/envinfo": { "version": "7.8.1", "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.8.1.tgz", "integrity": "sha512-/o+BXHmB7ocbHEAs6F2EnG0ogybVVUdkRunTT2glZU9XAaGmhqskrvKwqXuDfNjEO0LZKWdejEEpnq8aM0tOaw==", "dev": true, "bin": { "envinfo": "dist/cli.js" }, "engines": { "node": ">=4" } }, "node_modules/es-module-lexer": { "version": "0.9.3", "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-0.9.3.tgz", "integrity": "sha512-1HQ2M2sPtxwnvOvT1ZClHyQDiggdNjURWpY2we6aMKCQiUVxTmVs2UYPLIrD84sS+kMdUwfBSylbJPwNnBrnHQ==", "dev": true }, "node_modules/escalade": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", "dev": true, "engines": { "node": ">=6" } }, "node_modules/escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", "dev": true }, "node_modules/eslint-scope": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", "dev": true, "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^4.1.1" }, "engines": { "node": ">=8.0.0" } }, "node_modules/esrecurse": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, "dependencies": { "estraverse": "^5.2.0" }, "engines": { "node": ">=4.0" } }, "node_modules/esrecurse/node_modules/estraverse": { "version": "5.3.0", "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", "dev": true, "engines": { "node": ">=4.0" } }, "node_modules/estraverse": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", "dev": true, "engines": { "node": ">=4.0" } }, "node_modules/etag": { "version": "1.8.1", "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", "dev": true, "engines": { "node": ">= 0.6" } }, "node_modules/eventemitter3": { "version": "4.0.7", "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", "dev": true }, "node_modules/events": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", "dev": true, "engines": { "node": ">=0.8.x" } }, "node_modules/execa": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", "dev": true, "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^6.0.0", "human-signals": "^2.1.0", "is-stream": "^2.0.0", "merge-stream": "^2.0.0", "npm-run-path": "^4.0.1", "onetime": "^5.1.2", "signal-exit": "^3.0.3", "strip-final-newline": "^2.0.0" }, "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/sindresorhus/execa?sponsor=1" } }, "node_modules/express": { "version": "4.18.1", "resolved": "https://registry.npmjs.org/express/-/express-4.18.1.tgz", "integrity": "sha512-zZBcOX9TfehHQhtupq57OF8lFZ3UZi08Y97dwFCkD8p9d/d2Y3M+ykKcwaMDEL+4qyUolgBDX6AblpR3fL212Q==", "dev": true, "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", "body-parser": "1.20.0", "content-disposition": "0.5.4", "content-type": "~1.0.4", "cookie": "0.5.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "etag": "~1.8.1", "finalhandler": "1.2.0", "fresh": "0.5.2", "http-errors": "2.0.0", "merge-descriptors": "1.0.1", "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", "path-to-regexp": "0.1.7", "proxy-addr": "~2.0.7", "qs": "6.10.3", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", "send": "0.18.0", "serve-static": "1.15.0", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", "utils-merge": "1.0.1", "vary": "~1.1.2" }, "engines": { "node": ">= 0.10.0" } }, "node_modules/express/node_modules/array-flatten": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", "dev": true }, "node_modules/express/node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", "dev": true, "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ] }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", "dev": true }, "node_modules/fast-glob": { "version": "3.2.12", "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", "dev": true, "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.4" }, "engines": { "node": ">=8.6.0" } }, "node_modules/fast-glob/node_modules/glob-parent": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", "dev": true, "dependencies": { "is-glob": "^4.0.1" }, "engines": { "node": ">= 6" } }, "node_modules/fast-json-stable-stringify": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", "dev": true }, "node_modules/fastest-levenshtein": { "version": "1.0.16", "resolved": "https://registry.npmjs.org/fastest-levenshtein/-/fastest-levenshtein-1.0.16.tgz", "integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==", "dev": true, "engines": { "node": ">= 4.9.1" } }, "node_modules/fastq": { "version": "1.15.0", "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", "dev": true, "dependencies": { "reusify": "^1.0.4" } }, "node_modules/faye-websocket": { "version": "0.11.4", "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", "dev": true, "dependencies": { "websocket-driver": ">=0.5.1" }, "engines": { "node": ">=0.8.0" } }, "node_modules/fill-range": { "version": "7.0.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", "dev": true, "dependencies": { "to-regex-range": "^5.0.1" }, "engines": { "node": ">=8" } }, "node_modules/finalhandler": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", "dev": true, "dependencies": { "debug": "2.6.9", "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "on-finished": "2.4.1", "parseurl": "~1.3.3", "statuses": "2.0.1", "unpipe": "~1.0.0" }, "engines": { "node": ">= 0.8" } }, "node_modules/find-up": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", "dev": true, "dependencies": { "locate-path": "^5.0.0", "path-exists": "^4.0.0" }, "engines": { "node": ">=8" } }, "node_modules/follow-redirects": { "version": "1.15.1", "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz", "integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==", "dev": true, "funding": [ { "type": "individual", "url": "https://github.com/sponsors/RubenVerborgh" } ], "engines": { "node": ">=4.0" }, "peerDependenciesMeta": { "debug": { "optional": true } } }, "node_modules/forwarded": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", "dev": true, "engines": { "node": ">= 0.6" } }, "node_modules/fresh": { "version": "0.5.2", "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", "dev": true, "engines": { "node": ">= 0.6" } }, "node_modules/fs-monkey": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.3.tgz", "integrity": "sha512-cybjIfiiE+pTWicSCLFHSrXZ6EilF30oh91FDP9S2B051prEa7QWfrVTQm10/dDpswBDXZugPa1Ogu8Yh+HV0Q==", "dev": true }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", "dev": true }, "node_modules/fsevents": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", "dev": true, "hasInstallScript": true, "optional": true, "os": [ "darwin" ], "engines": { "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, "node_modules/function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", "dev": true }, "node_modules/get-intrinsic": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz", "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==", "dev": true, "dependencies": { "function-bind": "^1.1.1", "has": "^1.0.3", "has-symbols": "^1.0.1" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/get-stream": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", "dev": true, "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", "dev": true, "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", "minimatch": "^3.1.1", "once": "^1.3.0", "path-is-absolute": "^1.0.0" }, "engines": { "node": "*" }, "funding": { "url": "https://github.com/sponsors/isaacs" } }, "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", "dev": true, "dependencies": { "is-glob": "^4.0.3" }, "engines": { "node": ">=10.13.0" } }, "node_modules/glob-to-regexp": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", "dev": true }, "node_modules/globby": { "version": "13.1.3", "resolved": "https://registry.npmjs.org/globby/-/globby-13.1.3.tgz", "integrity": "sha512-8krCNHXvlCgHDpegPzleMq07yMYTO2sXKASmZmquEYWEmCx6J5UTRbp5RwMJkTJGtcQ44YpiUYUiN0b9mzy8Bw==", "dev": true, "dependencies": { "dir-glob": "^3.0.1", "fast-glob": "^3.2.11", "ignore": "^5.2.0", "merge2": "^1.4.1", "slash": "^4.0.0" }, "engines": { "node": "^12.20.0 || ^14.13.1 || >=16.0.0" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/graceful-fs": { "version": "4.2.10", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==", "dev": true }, "node_modules/handle-thing": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==", "dev": true }, "node_modules/has": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", "dev": true, "dependencies": { "function-bind": "^1.1.1" }, "engines": { "node": ">= 0.4.0" } }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, "engines": { "node": ">=8" } }, "node_modules/has-symbols": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", "dev": true, "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/hpack.js": { "version": "2.1.6", "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", "dev": true, "dependencies": { "inherits": "^2.0.1", "obuf": "^1.0.0", "readable-stream": "^2.0.1", "wbuf": "^1.1.0" } }, "node_modules/html-entities": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.3.3.tgz", "integrity": "sha512-DV5Ln36z34NNTDgnz0EWGBLZENelNAtkiFA4kyNOG2tDI6Mz1uSWiq1wAKdyjnJwyDiDO7Fa2SO1CTxPXL8VxA==", "dev": true }, "node_modules/http-deceiver": { "version": "1.2.7", "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==", "dev": true }, "node_modules/http-errors": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", "dev": true, "dependencies": { "depd": "2.0.0", "inherits": "2.0.4", "setprototypeof": "1.2.0", "statuses": "2.0.1", "toidentifier": "1.0.1" }, "engines": { "node": ">= 0.8" } }, "node_modules/http-parser-js": { "version": "0.5.6", "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.6.tgz", "integrity": "sha512-vDlkRPDJn93swjcjqMSaGSPABbIarsr1TLAui/gLDXzV5VsJNdXNzMYDyNBLQkjWQCJ1uizu8T2oDMhmGt0PRA==", "dev": true }, "node_modules/http-proxy": { "version": "1.18.1", "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", "dev": true, "dependencies": { "eventemitter3": "^4.0.0", "follow-redirects": "^1.0.0", "requires-port": "^1.0.0" }, "engines": { "node": ">=8.0.0" } }, "node_modules/http-proxy-middleware": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", "dev": true, "dependencies": { "@types/http-proxy": "^1.17.8", "http-proxy": "^1.18.1", "is-glob": "^4.0.1", "is-plain-obj": "^3.0.0", "micromatch": "^4.0.2" }, "engines": { "node": ">=12.0.0" }, "peerDependencies": { "@types/express": "^4.17.13" }, "peerDependenciesMeta": { "@types/express": { "optional": true } } }, "node_modules/human-signals": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", "dev": true, "engines": { "node": ">=10.17.0" } }, "node_modules/iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", "dev": true, "dependencies": { "safer-buffer": ">= 2.1.2 < 3" }, "engines": { "node": ">=0.10.0" } }, "node_modules/ignore": { "version": "5.2.4", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", "dev": true, "engines": { "node": ">= 4" } }, "node_modules/import-local": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz", "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==", "dev": true, "dependencies": { "pkg-dir": "^4.2.0", "resolve-cwd": "^3.0.0" }, "bin": { "import-local-fixture": "fixtures/cli.js" }, "engines": { "node": ">=8" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/inflight": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", "dev": true, "dependencies": { "once": "^1.3.0", "wrappy": "1" } }, "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", "dev": true }, "node_modules/interpret": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/interpret/-/interpret-3.1.1.tgz", "integrity": "sha512-6xwYfHbajpoF0xLW+iwLkhwgvLoZDfjYfoFNu8ftMoXINzwuymNLd9u/KmwtdT2GbR+/Cz66otEGEVVUHX9QLQ==", "dev": true, "engines": { "node": ">=10.13.0" } }, "node_modules/ipaddr.js": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", "dev": true, "engines": { "node": ">= 0.10" } }, "node_modules/is-binary-path": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", "dev": true, "dependencies": { "binary-extensions": "^2.0.0" }, "engines": { "node": ">=8" } }, "node_modules/is-core-module": { "version": "2.11.0", "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.11.0.tgz", "integrity": "sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==", "dev": true, "dependencies": { "has": "^1.0.3" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-docker": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", "dev": true, "bin": { "is-docker": "cli.js" }, "engines": { "node": ">=8" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", "dev": true, "engines": { "node": ">=0.10.0" } }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dev": true, "dependencies": { "is-extglob": "^2.1.1" }, "engines": { "node": ">=0.10.0" } }, "node_modules/is-number": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "dev": true, "engines": { "node": ">=0.12.0" } }, "node_modules/is-plain-obj": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", "dev": true, "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/is-plain-object": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", "dev": true, "dependencies": { "isobject": "^3.0.1" }, "engines": { "node": ">=0.10.0" } }, "node_modules/is-stream": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", "dev": true, "engines": { "node": ">=8" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/isarray": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==", "dev": true }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", "dev": true }, "node_modules/isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", "dev": true, "engines": { "node": ">=0.10.0" } }, "node_modules/jest-worker": { "version": "27.5.1", "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", "dev": true, "dependencies": { "@types/node": "*", "merge-stream": "^2.0.0", "supports-color": "^8.0.0" }, "engines": { "node": ">= 10.13.0" } }, "node_modules/json-parse-even-better-errors": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", "dev": true }, "node_modules/json-schema-traverse": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", "dev": true }, "node_modules/kind-of": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", "dev": true, "engines": { "node": ">=0.10.0" } }, "node_modules/loader-runner": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", "dev": true, "engines": { "node": ">=6.11.5" } }, "node_modules/locate-path": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "dev": true, "dependencies": { "p-locate": "^4.1.0" }, "engines": { "node": ">=8" } }, "node_modules/media-typer": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", "dev": true, "engines": { "node": ">= 0.6" } }, "node_modules/memfs": { "version": "3.4.7", "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.4.7.tgz", "integrity": "sha512-ygaiUSNalBX85388uskeCyhSAoOSgzBbtVCr9jA2RROssFL9Q19/ZXFqS+2Th2sr1ewNIWgFdLzLC3Yl1Zv+lw==", "dev": true, "dependencies": { "fs-monkey": "^1.0.3" }, "engines": { "node": ">= 4.0.0" } }, "node_modules/merge-descriptors": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==", "dev": true }, "node_modules/merge-stream": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", "dev": true }, "node_modules/merge2": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", "dev": true, "engines": { "node": ">= 8" } }, "node_modules/methods": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", "dev": true, "engines": { "node": ">= 0.6" } }, "node_modules/micromatch": { "version": "4.0.5", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", "dev": true, "dependencies": { "braces": "^3.0.2", "picomatch": "^2.3.1" }, "engines": { "node": ">=8.6" } }, "node_modules/mime": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", "dev": true, "bin": { "mime": "cli.js" }, "engines": { "node": ">=4" } }, "node_modules/mime-db": { "version": "1.52.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", "dev": true, "engines": { "node": ">= 0.6" } }, "node_modules/mime-types": { "version": "2.1.35", "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", "dev": true, "dependencies": { "mime-db": "1.52.0" }, "engines": { "node": ">= 0.6" } }, "node_modules/mimic-fn": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", "dev": true, "engines": { "node": ">=6" } }, "node_modules/minimalistic-assert": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==", "dev": true }, "node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, "dependencies": { "brace-expansion": "^1.1.7" }, "engines": { "node": "*" } }, "node_modules/ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", "dev": true }, "node_modules/multicast-dns": { "version": "7.2.5", "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", "dev": true, "dependencies": { "dns-packet": "^5.2.2", "thunky": "^1.0.2" }, "bin": { "multicast-dns": "cli.js" } }, "node_modules/negotiator": { "version": "0.6.3", "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", "dev": true, "engines": { "node": ">= 0.6" } }, "node_modules/neo-async": { "version": "2.6.2", "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", "dev": true }, "node_modules/node-forge": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", "dev": true, "engines": { "node": ">= 6.13.0" } }, "node_modules/node-releases": { "version": "2.0.8", "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.8.tgz", "integrity": "sha512-dFSmB8fFHEH/s81Xi+Y/15DQY6VHW81nXRj86EMSL3lmuTmK1e+aT4wrFCkTbm+gSwkw4KpX+rT/pMM2c1mF+A==", "dev": true }, "node_modules/normalize-path": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", "dev": true, "engines": { "node": ">=0.10.0" } }, "node_modules/npm-run-path": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", "dev": true, "dependencies": { "path-key": "^3.0.0" }, "engines": { "node": ">=8" } }, "node_modules/object-inspect": { "version": "1.12.2", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz", "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==", "dev": true, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/obuf": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==", "dev": true }, "node_modules/on-finished": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", "dev": true, "dependencies": { "ee-first": "1.1.1" }, "engines": { "node": ">= 0.8" } }, "node_modules/on-headers": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", "dev": true, "engines": { "node": ">= 0.8" } }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", "dev": true, "dependencies": { "wrappy": "1" } }, "node_modules/onetime": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", "dev": true, "dependencies": { "mimic-fn": "^2.1.0" }, "engines": { "node": ">=6" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/open": { "version": "8.4.0", "resolved": "https://registry.npmjs.org/open/-/open-8.4.0.tgz", "integrity": "sha512-XgFPPM+B28FtCCgSb9I+s9szOC1vZRSwgWsRUA5ylIxRTgKozqjOCrVOqGsYABPYK5qnfqClxZTFBa8PKt2v6Q==", "dev": true, "dependencies": { "define-lazy-prop": "^2.0.0", "is-docker": "^2.1.1", "is-wsl": "^2.2.0" }, "engines": { "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/open/node_modules/is-wsl": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", "dev": true, "dependencies": { "is-docker": "^2.0.0" }, "engines": { "node": ">=8" } }, "node_modules/p-limit": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "dev": true, "dependencies": { "p-try": "^2.0.0" }, "engines": { "node": ">=6" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/p-locate": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "dev": true, "dependencies": { "p-limit": "^2.2.0" }, "engines": { "node": ">=8" } }, "node_modules/p-retry": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", "dev": true, "dependencies": { "@types/retry": "0.12.0", "retry": "^0.13.1" }, "engines": { "node": ">=8" } }, "node_modules/p-try": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", "dev": true, "engines": { "node": ">=6" } }, "node_modules/parseurl": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", "dev": true, "engines": { "node": ">= 0.8" } }, "node_modules/path-exists": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", "dev": true, "engines": { "node": ">=8" } }, "node_modules/path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", "dev": true, "engines": { "node": ">=0.10.0" } }, "node_modules/path-key": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", "dev": true, "engines": { "node": ">=8" } }, "node_modules/path-parse": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", "dev": true }, "node_modules/path-to-regexp": { "version": "0.1.7", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==", "dev": true }, "node_modules/path-type": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", "dev": true, "engines": { "node": ">=8" } }, "node_modules/picocolors": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", "dev": true }, "node_modules/picomatch": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", "dev": true, "engines": { "node": ">=8.6" }, "funding": { "url": "https://github.com/sponsors/jonschlinkert" } }, "node_modules/pkg-dir": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", "dev": true, "dependencies": { "find-up": "^4.0.0" }, "engines": { "node": ">=8" } }, "node_modules/process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", "dev": true }, "node_modules/proxy-addr": { "version": "2.0.7", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", "dev": true, "dependencies": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" }, "engines": { "node": ">= 0.10" } }, "node_modules/punycode": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", "dev": true, "engines": { "node": ">=6" } }, "node_modules/qs": { "version": "6.10.3", "resolved": "https://registry.npmjs.org/qs/-/qs-6.10.3.tgz", "integrity": "sha512-wr7M2E0OFRfIfJZjKGieI8lBKb7fRCH4Fv5KNPEs7gJ8jadvotdsS08PzOKR7opXhZ/Xkjtt3WF9g38drmyRqQ==", "dev": true, "dependencies": { "side-channel": "^1.0.4" }, "engines": { "node": ">=0.6" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", "dev": true, "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ] }, "node_modules/randombytes": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", "dev": true, "dependencies": { "safe-buffer": "^5.1.0" } }, "node_modules/range-parser": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", "dev": true, "engines": { "node": ">= 0.6" } }, "node_modules/raw-body": { "version": "2.5.1", "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", "dev": true, "dependencies": { "bytes": "3.1.2", "http-errors": "2.0.0", "iconv-lite": "0.4.24", "unpipe": "1.0.0" }, "engines": { "node": ">= 0.8" } }, "node_modules/raw-body/node_modules/bytes": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", "dev": true, "engines": { "node": ">= 0.8" } }, "node_modules/readable-stream": { "version": "2.3.7", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", "dev": true, "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", "isarray": "~1.0.0", "process-nextick-args": "~2.0.0", "safe-buffer": "~5.1.1", "string_decoder": "~1.1.1", "util-deprecate": "~1.0.1" } }, "node_modules/readdirp": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", "dev": true, "dependencies": { "picomatch": "^2.2.1" }, "engines": { "node": ">=8.10.0" } }, "node_modules/rechoir": { "version": "0.8.0", "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.8.0.tgz", "integrity": "sha512-/vxpCXddiX8NGfGO/mTafwjq4aFa/71pvamip0++IQk3zG8cbCj0fifNPrjjF1XMXUne91jL9OoxmdykoEtifQ==", "dev": true, "dependencies": { "resolve": "^1.20.0" }, "engines": { "node": ">= 10.13.0" } }, "node_modules/require-from-string": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", "dev": true, "engines": { "node": ">=0.10.0" } }, "node_modules/requires-port": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", "dev": true }, "node_modules/resolve": { "version": "1.22.1", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", "dev": true, "dependencies": { "is-core-module": "^2.9.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, "bin": { "resolve": "bin/resolve" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/resolve-cwd": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", "dev": true, "dependencies": { "resolve-from": "^5.0.0" }, "engines": { "node": ">=8" } }, "node_modules/resolve-from": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", "dev": true, "engines": { "node": ">=8" } }, "node_modules/retry": { "version": "0.13.1", "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", "dev": true, "engines": { "node": ">= 4" } }, "node_modules/reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", "dev": true, "engines": { "iojs": ">=1.0.0", "node": ">=0.10.0" } }, "node_modules/run-parallel": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", "dev": true, "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ], "dependencies": { "queue-microtask": "^1.2.2" } }, "node_modules/safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", "dev": true }, "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", "dev": true }, "node_modules/schema-utils": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", "dev": true, "dependencies": { "@types/json-schema": "^7.0.9", "ajv": "^8.8.0", "ajv-formats": "^2.1.1", "ajv-keywords": "^5.0.0" }, "engines": { "node": ">= 12.13.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/webpack" } }, "node_modules/select-hose": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", "integrity": "sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo=", "dev": true }, "node_modules/selfsigned": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.0.1.tgz", "integrity": "sha512-LmME957M1zOsUhG+67rAjKfiWFox3SBxE/yymatMZsAx+oMrJ0YQ8AToOnyCm7xbeg2ep37IHLxdu0o2MavQOQ==", "dev": true, "dependencies": { "node-forge": "^1" }, "engines": { "node": ">=10" } }, "node_modules/send": { "version": "0.18.0", "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", "dev": true, "dependencies": { "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "etag": "~1.8.1", "fresh": "0.5.2", "http-errors": "2.0.0", "mime": "1.6.0", "ms": "2.1.3", "on-finished": "2.4.1", "range-parser": "~1.2.1", "statuses": "2.0.1" }, "engines": { "node": ">= 0.8.0" } }, "node_modules/send/node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true }, "node_modules/serialize-javascript": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", "dev": true, "dependencies": { "randombytes": "^2.1.0" } }, "node_modules/serve-index": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", "integrity": "sha1-03aNabHn2C5c4FD/9bRTvqEqkjk=", "dev": true, "dependencies": { "accepts": "~1.3.4", "batch": "0.6.1", "debug": "2.6.9", "escape-html": "~1.0.3", "http-errors": "~1.6.2", "mime-types": "~2.1.17", "parseurl": "~1.3.2" }, "engines": { "node": ">= 0.8.0" } }, "node_modules/serve-index/node_modules/depd": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", "dev": true, "engines": { "node": ">= 0.6" } }, "node_modules/serve-index/node_modules/http-errors": { "version": "1.6.3", "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", "dev": true, "dependencies": { "depd": "~1.1.2", "inherits": "2.0.3", "setprototypeof": "1.1.0", "statuses": ">= 1.4.0 < 2" }, "engines": { "node": ">= 0.6" } }, "node_modules/serve-index/node_modules/inherits": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==", "dev": true }, "node_modules/serve-index/node_modules/setprototypeof": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==", "dev": true }, "node_modules/serve-index/node_modules/statuses": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=", "dev": true, "engines": { "node": ">= 0.6" } }, "node_modules/serve-static": { "version": "1.15.0", "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", "dev": true, "dependencies": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "parseurl": "~1.3.3", "send": "0.18.0" }, "engines": { "node": ">= 0.8.0" } }, "node_modules/setprototypeof": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", "dev": true }, "node_modules/shallow-clone": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", "dev": true, "dependencies": { "kind-of": "^6.0.2" }, "engines": { "node": ">=8" } }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", "dev": true, "dependencies": { "shebang-regex": "^3.0.0" }, "engines": { "node": ">=8" } }, "node_modules/shebang-regex": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", "dev": true, "engines": { "node": ">=8" } }, "node_modules/side-channel": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", "dev": true, "dependencies": { "call-bind": "^1.0.0", "get-intrinsic": "^1.0.2", "object-inspect": "^1.9.0" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/signal-exit": { "version": "3.0.7", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "dev": true }, "node_modules/slash": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", "dev": true, "engines": { "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/sockjs": { "version": "0.3.24", "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", "dev": true, "dependencies": { "faye-websocket": "^0.11.3", "uuid": "^8.3.2", "websocket-driver": "^0.7.4" } }, "node_modules/source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "dev": true, "engines": { "node": ">=0.10.0" } }, "node_modules/source-map-support": { "version": "0.5.21", "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", "dev": true, "dependencies": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" } }, "node_modules/spdy": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", "dev": true, "dependencies": { "debug": "^4.1.0", "handle-thing": "^2.0.0", "http-deceiver": "^1.2.7", "select-hose": "^2.0.0", "spdy-transport": "^3.0.0" }, "engines": { "node": ">=6.0.0" } }, "node_modules/spdy-transport": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", "dev": true, "dependencies": { "debug": "^4.1.0", "detect-node": "^2.0.4", "hpack.js": "^2.1.6", "obuf": "^1.1.2", "readable-stream": "^3.0.6", "wbuf": "^1.7.3" } }, "node_modules/spdy-transport/node_modules/debug": { "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", "dev": true, "dependencies": { "ms": "2.1.2" }, "engines": { "node": ">=6.0" }, "peerDependenciesMeta": { "supports-color": { "optional": true } } }, "node_modules/spdy-transport/node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", "dev": true }, "node_modules/spdy-transport/node_modules/readable-stream": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", "dev": true, "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" }, "engines": { "node": ">= 6" } }, "node_modules/spdy/node_modules/debug": { "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", "dev": true, "dependencies": { "ms": "2.1.2" }, "engines": { "node": ">=6.0" }, "peerDependenciesMeta": { "supports-color": { "optional": true } } }, "node_modules/spdy/node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", "dev": true }, "node_modules/statuses": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", "dev": true, "engines": { "node": ">= 0.8" } }, "node_modules/string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dev": true, "dependencies": { "safe-buffer": "~5.1.0" } }, "node_modules/strip-final-newline": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", "dev": true, "engines": { "node": ">=6" } }, "node_modules/supports-color": { "version": "8.1.1", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dev": true, "dependencies": { "has-flag": "^4.0.0" }, "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/chalk/supports-color?sponsor=1" } }, "node_modules/supports-preserve-symlinks-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", "dev": true, "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/tapable": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", "dev": true, "engines": { "node": ">=6" } }, "node_modules/terser": { "version": "5.16.1", "resolved": "https://registry.npmjs.org/terser/-/terser-5.16.1.tgz", "integrity": "sha512-xvQfyfA1ayT0qdK47zskQgRZeWLoOQ8JQ6mIgRGVNwZKdQMU+5FkCBjmv4QjcrTzyZquRw2FVtlJSRUmMKQslw==", "dev": true, "dependencies": { "@jridgewell/source-map": "^0.3.2", "acorn": "^8.5.0", "commander": "^2.20.0", "source-map-support": "~0.5.20" }, "bin": { "terser": "bin/terser" }, "engines": { "node": ">=10" } }, "node_modules/terser-webpack-plugin": { "version": "5.3.6", "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.6.tgz", "integrity": "sha512-kfLFk+PoLUQIbLmB1+PZDMRSZS99Mp+/MHqDNmMA6tOItzRt+Npe3E+fsMs5mfcM0wCtrrdU387UnV+vnSffXQ==", "dev": true, "dependencies": { "@jridgewell/trace-mapping": "^0.3.14", "jest-worker": "^27.4.5", "schema-utils": "^3.1.1", "serialize-javascript": "^6.0.0", "terser": "^5.14.1" }, "engines": { "node": ">= 10.13.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/webpack" }, "peerDependencies": { "webpack": "^5.1.0" }, "peerDependenciesMeta": { "@swc/core": { "optional": true }, "esbuild": { "optional": true }, "uglify-js": { "optional": true } } }, "node_modules/terser-webpack-plugin/node_modules/ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" }, "funding": { "type": "github", "url": "https://github.com/sponsors/epoberezkin" } }, "node_modules/terser-webpack-plugin/node_modules/ajv-keywords": { "version": "3.5.2", "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", "dev": true, "peerDependencies": { "ajv": "^6.9.1" } }, "node_modules/terser-webpack-plugin/node_modules/json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true }, "node_modules/terser-webpack-plugin/node_modules/schema-utils": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.1.1.tgz", "integrity": "sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw==", "dev": true, "dependencies": { "@types/json-schema": "^7.0.8", "ajv": "^6.12.5", "ajv-keywords": "^3.5.2" }, "engines": { "node": ">= 10.13.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/webpack" } }, "node_modules/thunky": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==", "dev": true }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "dev": true, "dependencies": { "is-number": "^7.0.0" }, "engines": { "node": ">=8.0" } }, "node_modules/toidentifier": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", "dev": true, "engines": { "node": ">=0.6" } }, "node_modules/type-is": { "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", "dev": true, "dependencies": { "media-typer": "0.3.0", "mime-types": "~2.1.24" }, "engines": { "node": ">= 0.6" } }, "node_modules/unpipe": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=", "dev": true, "engines": { "node": ">= 0.8" } }, "node_modules/unstable_wasm": { "resolved": "../pkg", "link": true }, "node_modules/update-browserslist-db": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.10.tgz", "integrity": "sha512-OztqDenkfFkbSG+tRxBeAnCVPckDBcvibKd35yDONx6OU8N7sqgwc7rCbkJ/WcYtVRZ4ba68d6byhC21GFh7sQ==", "dev": true, "funding": [ { "type": "opencollective", "url": "https://opencollective.com/browserslist" }, { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/browserslist" } ], "dependencies": { "escalade": "^3.1.1", "picocolors": "^1.0.0" }, "bin": { "browserslist-lint": "cli.js" }, "peerDependencies": { "browserslist": ">= 4.21.0" } }, "node_modules/uri-js": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", "dev": true, "dependencies": { "punycode": "^2.1.0" } }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", "dev": true }, "node_modules/utils-merge": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=", "dev": true, "engines": { "node": ">= 0.4.0" } }, "node_modules/uuid": { "version": "8.3.2", "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", "dev": true, "bin": { "uuid": "dist/bin/uuid" } }, "node_modules/vary": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=", "dev": true, "engines": { "node": ">= 0.8" } }, "node_modules/watchpack": { "version": "2.4.0", "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", "dev": true, "dependencies": { "glob-to-regexp": "^0.4.1", "graceful-fs": "^4.1.2" }, "engines": { "node": ">=10.13.0" } }, "node_modules/wbuf": { "version": "1.7.3", "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", "dev": true, "dependencies": { "minimalistic-assert": "^1.0.0" } }, "node_modules/webpack": { "version": "5.76.0", "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.76.0.tgz", "integrity": "sha512-l5sOdYBDunyf72HW8dF23rFtWq/7Zgvt/9ftMof71E/yUb1YLOBmTgA2K4vQthB3kotMrSj609txVE0dnr2fjA==", "dev": true, "dependencies": { "@types/eslint-scope": "^3.7.3", "@types/estree": "^0.0.51", "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/wasm-edit": "1.11.1", "@webassemblyjs/wasm-parser": "1.11.1", "acorn": "^8.7.1", "acorn-import-assertions": "^1.7.6", "browserslist": "^4.14.5", "chrome-trace-event": "^1.0.2", "enhanced-resolve": "^5.10.0", "es-module-lexer": "^0.9.0", "eslint-scope": "5.1.1", "events": "^3.2.0", "glob-to-regexp": "^0.4.1", "graceful-fs": "^4.2.9", "json-parse-even-better-errors": "^2.3.1", "loader-runner": "^4.2.0", "mime-types": "^2.1.27", "neo-async": "^2.6.2", "schema-utils": "^3.1.0", "tapable": "^2.1.1", "terser-webpack-plugin": "^5.1.3", "watchpack": "^2.4.0", "webpack-sources": "^3.2.3" }, "bin": { "webpack": "bin/webpack.js" }, "engines": { "node": ">=10.13.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/webpack" }, "peerDependenciesMeta": { "webpack-cli": { "optional": true } } }, "node_modules/webpack-cli": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/webpack-cli/-/webpack-cli-5.0.1.tgz", "integrity": "sha512-S3KVAyfwUqr0Mo/ur3NzIp6jnerNpo7GUO6so51mxLi1spqsA17YcMXy0WOIJtBSnj748lthxC6XLbNKh/ZC+A==", "dev": true, "dependencies": { "@discoveryjs/json-ext": "^0.5.0", "@webpack-cli/configtest": "^2.0.1", "@webpack-cli/info": "^2.0.1", "@webpack-cli/serve": "^2.0.1", "colorette": "^2.0.14", "commander": "^9.4.1", "cross-spawn": "^7.0.3", "envinfo": "^7.7.3", "fastest-levenshtein": "^1.0.12", "import-local": "^3.0.2", "interpret": "^3.1.1", "rechoir": "^0.8.0", "webpack-merge": "^5.7.3" }, "bin": { "webpack-cli": "bin/cli.js" }, "engines": { "node": ">=14.15.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/webpack" }, "peerDependencies": { "webpack": "5.x.x" }, "peerDependenciesMeta": { "@webpack-cli/generators": { "optional": true }, "webpack-bundle-analyzer": { "optional": true }, "webpack-dev-server": { "optional": true } } }, "node_modules/webpack-cli/node_modules/commander": { "version": "9.4.1", "resolved": "https://registry.npmjs.org/commander/-/commander-9.4.1.tgz", "integrity": "sha512-5EEkTNyHNGFPD2H+c/dXXfQZYa/scCKasxWcXJaWnNJ99pnQN9Vnmqow+p+PlFPE63Q6mThaZws1T+HxfpgtPw==", "dev": true, "engines": { "node": "^12.20.0 || >=14" } }, "node_modules/webpack-dev-middleware": { "version": "5.3.3", "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", "dev": true, "dependencies": { "colorette": "^2.0.10", "memfs": "^3.4.3", "mime-types": "^2.1.31", "range-parser": "^1.2.1", "schema-utils": "^4.0.0" }, "engines": { "node": ">= 12.13.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/webpack" }, "peerDependencies": { "webpack": "^4.0.0 || ^5.0.0" } }, "node_modules/webpack-dev-server": { "version": "4.10.0", "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.10.0.tgz", "integrity": "sha512-7dezwAs+k6yXVFZ+MaL8VnE+APobiO3zvpp3rBHe/HmWQ+avwh0Q3d0xxacOiBybZZ3syTZw9HXzpa3YNbAZDQ==", "dev": true, "dependencies": { "@types/bonjour": "^3.5.9", "@types/connect-history-api-fallback": "^1.3.5", "@types/express": "^4.17.13", "@types/serve-index": "^1.9.1", "@types/serve-static": "^1.13.10", "@types/sockjs": "^0.3.33", "@types/ws": "^8.5.1", "ansi-html-community": "^0.0.8", "bonjour-service": "^1.0.11", "chokidar": "^3.5.3", "colorette": "^2.0.10", "compression": "^1.7.4", "connect-history-api-fallback": "^2.0.0", "default-gateway": "^6.0.3", "express": "^4.17.3", "graceful-fs": "^4.2.6", "html-entities": "^2.3.2", "http-proxy-middleware": "^2.0.3", "ipaddr.js": "^2.0.1", "open": "^8.0.9", "p-retry": "^4.5.0", "rimraf": "^3.0.2", "schema-utils": "^4.0.0", "selfsigned": "^2.0.1", "serve-index": "^1.9.1", "sockjs": "^0.3.24", "spdy": "^4.0.2", "webpack-dev-middleware": "^5.3.1", "ws": "^8.4.2" }, "bin": { "webpack-dev-server": "bin/webpack-dev-server.js" }, "engines": { "node": ">= 12.13.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/webpack" }, "peerDependencies": { "webpack": "^4.37.0 || ^5.0.0" }, "peerDependenciesMeta": { "webpack-cli": { "optional": true } } }, "node_modules/webpack-dev-server/node_modules/ipaddr.js": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.0.1.tgz", "integrity": "sha512-1qTgH9NG+IIJ4yfKs2e6Pp1bZg8wbDbKHT21HrLIeYBTRLgMYKnMTPAuI3Lcs61nfx5h1xlXnbJtH1kX5/d/ng==", "dev": true, "engines": { "node": ">= 10" } }, "node_modules/webpack-dev-server/node_modules/rimraf": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "dev": true, "dependencies": { "glob": "^7.1.3" }, "bin": { "rimraf": "bin.js" }, "funding": { "url": "https://github.com/sponsors/isaacs" } }, "node_modules/webpack-merge": { "version": "5.8.0", "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.8.0.tgz", "integrity": "sha512-/SaI7xY0831XwP6kzuwhKWVKDP9t1QY1h65lAFLbZqMPIuYcD9QAW4u9STIbU9kaJbPBB/geU/gLr1wDjOhQ+Q==", "dev": true, "dependencies": { "clone-deep": "^4.0.1", "wildcard": "^2.0.0" }, "engines": { "node": ">=10.0.0" } }, "node_modules/webpack-sources": { "version": "3.2.3", "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", "dev": true, "engines": { "node": ">=10.13.0" } }, "node_modules/webpack/node_modules/ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" }, "funding": { "type": "github", "url": "https://github.com/sponsors/epoberezkin" } }, "node_modules/webpack/node_modules/ajv-keywords": { "version": "3.5.2", "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", "dev": true, "peerDependencies": { "ajv": "^6.9.1" } }, "node_modules/webpack/node_modules/json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true }, "node_modules/webpack/node_modules/schema-utils": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.1.1.tgz", "integrity": "sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw==", "dev": true, "dependencies": { "@types/json-schema": "^7.0.8", "ajv": "^6.12.5", "ajv-keywords": "^3.5.2" }, "engines": { "node": ">= 10.13.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/webpack" } }, "node_modules/websocket-driver": { "version": "0.7.4", "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", "dev": true, "dependencies": { "http-parser-js": ">=0.5.1", "safe-buffer": ">=5.1.0", "websocket-extensions": ">=0.1.1" }, "engines": { "node": ">=0.8.0" } }, "node_modules/websocket-extensions": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", "dev": true, "engines": { "node": ">=0.8.0" } }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "dev": true, "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "bin/node-which" }, "engines": { "node": ">= 8" } }, "node_modules/wildcard": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.0.tgz", "integrity": "sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw==", "dev": true }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", "dev": true }, "node_modules/ws": { "version": "8.8.1", "resolved": "https://registry.npmjs.org/ws/-/ws-8.8.1.tgz", "integrity": "sha512-bGy2JzvzkPowEJV++hF07hAD6niYSr0JzBNo/J29WsB57A2r7Wlc1UFcTR9IzrPvuNVO4B8LGqF8qcpsVOhJCA==", "dev": true, "engines": { "node": ">=10.0.0" }, "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": "^5.0.2" }, "peerDependenciesMeta": { "bufferutil": { "optional": true }, "utf-8-validate": { "optional": true } } } }, "dependencies": { "@discoveryjs/json-ext": { "version": "0.5.7", "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", "dev": true }, "@jridgewell/gen-mapping": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz", "integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==", "dev": true, "requires": { "@jridgewell/set-array": "^1.0.1", "@jridgewell/sourcemap-codec": "^1.4.10", "@jridgewell/trace-mapping": "^0.3.9" } }, "@jridgewell/resolve-uri": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", "dev": true }, "@jridgewell/set-array": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", "dev": true }, "@jridgewell/source-map": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.2.tgz", "integrity": "sha512-m7O9o2uR8k2ObDysZYzdfhb08VuEml5oWGiosa1VdaPZ/A6QyPkAJuwN0Q1lhULOf6B7MtQmHENS743hWtCrgw==", "dev": true, "requires": { "@jridgewell/gen-mapping": "^0.3.0", "@jridgewell/trace-mapping": "^0.3.9" } }, "@jridgewell/sourcemap-codec": { "version": "1.4.14", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==", "dev": true }, "@jridgewell/trace-mapping": { "version": "0.3.17", "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.17.tgz", "integrity": "sha512-MCNzAp77qzKca9+W/+I0+sEpaUnZoeasnghNeVc41VZCEKaCH73Vq3BZZ/SzWIgrqE4H4ceI+p+b6C0mHf9T4g==", "dev": true, "requires": { "@jridgewell/resolve-uri": "3.1.0", "@jridgewell/sourcemap-codec": "1.4.14" } }, "@leichtgewicht/ip-codec": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz", "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==", "dev": true }, "@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", "dev": true, "requires": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" } }, "@nodelib/fs.stat": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", "dev": true }, "@nodelib/fs.walk": { "version": "1.2.8", "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", "dev": true, "requires": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" } }, "@types/body-parser": { "version": "1.19.2", "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==", "dev": true, "requires": { "@types/connect": "*", "@types/node": "*" } }, "@types/bonjour": { "version": "3.5.10", "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.10.tgz", "integrity": "sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw==", "dev": true, "requires": { "@types/node": "*" } }, "@types/connect": { "version": "3.4.35", "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz", "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==", "dev": true, "requires": { "@types/node": "*" } }, "@types/connect-history-api-fallback": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.3.5.tgz", "integrity": "sha512-h8QJa8xSb1WD4fpKBDcATDNGXghFj6/3GRWG6dhmRcu0RX1Ubasur2Uvx5aeEwlf0MwblEC2bMzzMQntxnw/Cw==", "dev": true, "requires": { "@types/express-serve-static-core": "*", "@types/node": "*" } }, "@types/eslint": { "version": "8.4.10", "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.4.10.tgz", "integrity": "sha512-Sl/HOqN8NKPmhWo2VBEPm0nvHnu2LL3v9vKo8MEq0EtbJ4eVzGPl41VNPvn5E1i5poMk4/XD8UriLHpJvEP/Nw==", "dev": true, "requires": { "@types/estree": "*", "@types/json-schema": "*" } }, "@types/eslint-scope": { "version": "3.7.4", "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.4.tgz", "integrity": "sha512-9K4zoImiZc3HlIp6AVUDE4CWYx22a+lhSZMYNpbjW04+YF0KWj4pJXnEMjdnFTiQibFFmElcsasJXDbdI/EPhA==", "dev": true, "requires": { "@types/eslint": "*", "@types/estree": "*" } }, "@types/estree": { "version": "0.0.51", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-0.0.51.tgz", "integrity": "sha512-CuPgU6f3eT/XgKKPqKd/gLZV1Xmvf1a2R5POBOGQa6uv82xpls89HU5zKeVoyR8XzHd1RGNOlQlvUe3CFkjWNQ==", "dev": true }, "@types/express": { "version": "4.17.13", "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.13.tgz", "integrity": "sha512-6bSZTPaTIACxn48l50SR+axgrqm6qXFIxrdAKaG6PaJk3+zuUr35hBlgT7vOmJcum+OEaIBLtHV/qloEAFITeA==", "dev": true, "requires": { "@types/body-parser": "*", "@types/express-serve-static-core": "^4.17.18", "@types/qs": "*", "@types/serve-static": "*" } }, "@types/express-serve-static-core": { "version": "4.17.30", "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.30.tgz", "integrity": "sha512-gstzbTWro2/nFed1WXtf+TtrpwxH7Ggs4RLYTLbeVgIkUQOI3WG/JKjgeOU1zXDvezllupjrf8OPIdvTbIaVOQ==", "dev": true, "requires": { "@types/node": "*", "@types/qs": "*", "@types/range-parser": "*" } }, "@types/http-proxy": { "version": "1.17.9", "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.9.tgz", "integrity": "sha512-QsbSjA/fSk7xB+UXlCT3wHBy5ai9wOcNDWwZAtud+jXhwOM3l+EYZh8Lng4+/6n8uar0J7xILzqftJdJ/Wdfkw==", "dev": true, "requires": { "@types/node": "*" } }, "@types/json-schema": { "version": "7.0.11", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==", "dev": true }, "@types/mime": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/@types/mime/-/mime-3.0.1.tgz", "integrity": "sha512-Y4XFY5VJAuw0FgAqPNd6NNoV44jbq9Bz2L7Rh/J6jLTiHBSBJa9fxqQIvkIld4GsoDOcCbvzOUAbLPsSKKg+uA==", "dev": true }, "@types/node": { "version": "18.7.13", "resolved": "https://registry.npmjs.org/@types/node/-/node-18.7.13.tgz", "integrity": "sha512-46yIhxSe5xEaJZXWdIBP7GU4HDTG8/eo0qd9atdiL+lFpA03y8KS+lkTN834TWJj5767GbWv4n/P6efyTFt1Dw==", "dev": true }, "@types/qs": { "version": "6.9.7", "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz", "integrity": "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==", "dev": true }, "@types/range-parser": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz", "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==", "dev": true }, "@types/retry": { "version": "0.12.0", "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", "dev": true }, "@types/serve-index": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.1.tgz", "integrity": "sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg==", "dev": true, "requires": { "@types/express": "*" } }, "@types/serve-static": { "version": "1.15.0", "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.0.tgz", "integrity": "sha512-z5xyF6uh8CbjAu9760KDKsH2FcDxZ2tFCsA4HIMWE6IkiYMXfVoa+4f9KX+FN0ZLsaMw1WNG2ETLA6N+/YA+cg==", "dev": true, "requires": { "@types/mime": "*", "@types/node": "*" } }, "@types/sockjs": { "version": "0.3.33", "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.33.tgz", "integrity": "sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw==", "dev": true, "requires": { "@types/node": "*" } }, "@types/ws": { "version": "8.5.3", "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.3.tgz", "integrity": "sha512-6YOoWjruKj1uLf3INHH7D3qTXwFfEsg1kf3c0uDdSBJwfa/llkwIjrAGV7j7mVgGNbzTQ3HiHKKDXl6bJPD97w==", "dev": true, "requires": { "@types/node": "*" } }, "@webassemblyjs/ast": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.1.tgz", "integrity": "sha512-ukBh14qFLjxTQNTXocdyksN5QdM28S1CxHt2rdskFyL+xFV7VremuBLVbmCePj+URalXBENx/9Lm7lnhihtCSw==", "dev": true, "requires": { "@webassemblyjs/helper-numbers": "1.11.1", "@webassemblyjs/helper-wasm-bytecode": "1.11.1" } }, "@webassemblyjs/floating-point-hex-parser": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.1.tgz", "integrity": "sha512-iGRfyc5Bq+NnNuX8b5hwBrRjzf0ocrJPI6GWFodBFzmFnyvrQ83SHKhmilCU/8Jv67i4GJZBMhEzltxzcNagtQ==", "dev": true }, "@webassemblyjs/helper-api-error": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.1.tgz", "integrity": "sha512-RlhS8CBCXfRUR/cwo2ho9bkheSXG0+NwooXcc3PAILALf2QLdFyj7KGsKRbVc95hZnhnERon4kW/D3SZpp6Tcg==", "dev": true }, "@webassemblyjs/helper-buffer": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.1.tgz", "integrity": "sha512-gwikF65aDNeeXa8JxXa2BAk+REjSyhrNC9ZwdT0f8jc4dQQeDQ7G4m0f2QCLPJiMTTO6wfDmRmj/pW0PsUvIcA==", "dev": true }, "@webassemblyjs/helper-numbers": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.1.tgz", "integrity": "sha512-vDkbxiB8zfnPdNK9Rajcey5C0w+QJugEglN0of+kmO8l7lDb77AnlKYQF7aarZuCrv+l0UvqL+68gSDr3k9LPQ==", "dev": true, "requires": { "@webassemblyjs/floating-point-hex-parser": "1.11.1", "@webassemblyjs/helper-api-error": "1.11.1", "@xtuc/long": "4.2.2" } }, "@webassemblyjs/helper-wasm-bytecode": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.1.tgz", "integrity": "sha512-PvpoOGiJwXeTrSf/qfudJhwlvDQxFgelbMqtq52WWiXC6Xgg1IREdngmPN3bs4RoO83PnL/nFrxucXj1+BX62Q==", "dev": true }, "@webassemblyjs/helper-wasm-section": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.1.tgz", "integrity": "sha512-10P9No29rYX1j7F3EVPX3JvGPQPae+AomuSTPiF9eBQeChHI6iqjMIwR9JmOJXwpnn/oVGDk7I5IlskuMwU/pg==", "dev": true, "requires": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-buffer": "1.11.1", "@webassemblyjs/helper-wasm-bytecode": "1.11.1", "@webassemblyjs/wasm-gen": "1.11.1" } }, "@webassemblyjs/ieee754": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.1.tgz", "integrity": "sha512-hJ87QIPtAMKbFq6CGTkZYJivEwZDbQUgYd3qKSadTNOhVY7p+gfP6Sr0lLRVTaG1JjFj+r3YchoqRYxNH3M0GQ==", "dev": true, "requires": { "@xtuc/ieee754": "^1.2.0" } }, "@webassemblyjs/leb128": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.1.tgz", "integrity": "sha512-BJ2P0hNZ0u+Th1YZXJpzW6miwqQUGcIHT1G/sf72gLVD9DZ5AdYTqPNbHZh6K1M5VmKvFXwGSWZADz+qBWxeRw==", "dev": true, "requires": { "@xtuc/long": "4.2.2" } }, "@webassemblyjs/utf8": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.1.tgz", "integrity": "sha512-9kqcxAEdMhiwQkHpkNiorZzqpGrodQQ2IGrHHxCy+Ozng0ofyMA0lTqiLkVs1uzTRejX+/O0EOT7KxqVPuXosQ==", "dev": true }, "@webassemblyjs/wasm-edit": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.1.tgz", "integrity": "sha512-g+RsupUC1aTHfR8CDgnsVRVZFJqdkFHpsHMfJuWQzWU3tvnLC07UqHICfP+4XyL2tnr1amvl1Sdp06TnYCmVkA==", "dev": true, "requires": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-buffer": "1.11.1", "@webassemblyjs/helper-wasm-bytecode": "1.11.1", "@webassemblyjs/helper-wasm-section": "1.11.1", "@webassemblyjs/wasm-gen": "1.11.1", "@webassemblyjs/wasm-opt": "1.11.1", "@webassemblyjs/wasm-parser": "1.11.1", "@webassemblyjs/wast-printer": "1.11.1" } }, "@webassemblyjs/wasm-gen": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.1.tgz", "integrity": "sha512-F7QqKXwwNlMmsulj6+O7r4mmtAlCWfO/0HdgOxSklZfQcDu0TpLiD1mRt/zF25Bk59FIjEuGAIyn5ei4yMfLhA==", "dev": true, "requires": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-wasm-bytecode": "1.11.1", "@webassemblyjs/ieee754": "1.11.1", "@webassemblyjs/leb128": "1.11.1", "@webassemblyjs/utf8": "1.11.1" } }, "@webassemblyjs/wasm-opt": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.1.tgz", "integrity": "sha512-VqnkNqnZlU5EB64pp1l7hdm3hmQw7Vgqa0KF/KCNO9sIpI6Fk6brDEiX+iCOYrvMuBWDws0NkTOxYEb85XQHHw==", "dev": true, "requires": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-buffer": "1.11.1", "@webassemblyjs/wasm-gen": "1.11.1", "@webassemblyjs/wasm-parser": "1.11.1" } }, "@webassemblyjs/wasm-parser": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.1.tgz", "integrity": "sha512-rrBujw+dJu32gYB7/Lup6UhdkPx9S9SnobZzRVL7VcBH9Bt9bCBLEuX/YXOOtBsOZ4NQrRykKhffRWHvigQvOA==", "dev": true, "requires": { "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/helper-api-error": "1.11.1", "@webassemblyjs/helper-wasm-bytecode": "1.11.1", "@webassemblyjs/ieee754": "1.11.1", "@webassemblyjs/leb128": "1.11.1", "@webassemblyjs/utf8": "1.11.1" } }, "@webassemblyjs/wast-printer": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.1.tgz", "integrity": "sha512-IQboUWM4eKzWW+N/jij2sRatKMh99QEelo3Eb2q0qXkvPRISAj8Qxtmw5itwqK+TTkBuUIE45AxYPToqPtL5gg==", "dev": true, "requires": { "@webassemblyjs/ast": "1.11.1", "@xtuc/long": "4.2.2" } }, "@webpack-cli/configtest": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/@webpack-cli/configtest/-/configtest-2.0.1.tgz", "integrity": "sha512-njsdJXJSiS2iNbQVS0eT8A/KPnmyH4pv1APj2K0d1wrZcBLw+yppxOy4CGqa0OxDJkzfL/XELDhD8rocnIwB5A==", "dev": true, "requires": {} }, "@webpack-cli/info": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/@webpack-cli/info/-/info-2.0.1.tgz", "integrity": "sha512-fE1UEWTwsAxRhrJNikE7v4EotYflkEhBL7EbajfkPlf6E37/2QshOy/D48Mw8G5XMFlQtS6YV42vtbG9zBpIQA==", "dev": true, "requires": {} }, "@webpack-cli/serve": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/@webpack-cli/serve/-/serve-2.0.1.tgz", "integrity": "sha512-0G7tNyS+yW8TdgHwZKlDWYXFA6OJQnoLCQvYKkQP0Q2X205PSQ6RNUj0M+1OB/9gRQaUZ/ccYfaxd0nhaWKfjw==", "dev": true, "requires": {} }, "@xtuc/ieee754": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", "dev": true }, "@xtuc/long": { "version": "4.2.2", "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", "dev": true }, "accepts": { "version": "1.3.8", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", "dev": true, "requires": { "mime-types": "~2.1.34", "negotiator": "0.6.3" } }, "acorn": { "version": "8.8.1", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.1.tgz", "integrity": "sha512-7zFpHzhnqYKrkYdUjF1HI1bzd0VygEGX8lFk4k5zVMqHEoES+P+7TKI+EvLO9WVMJ8eekdO0aDEK044xTXwPPA==", "dev": true }, "acorn-import-assertions": { "version": "1.8.0", "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.8.0.tgz", "integrity": "sha512-m7VZ3jwz4eK6A4Vtt8Ew1/mNbP24u0FhdyfA7fSvnJR6LMdfOYnmuIrrJAgrYfYJ10F/otaHTtrtrtmHdMNzEw==", "dev": true, "requires": {} }, "ajv": { "version": "8.11.2", "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.2.tgz", "integrity": "sha512-E4bfmKAhGiSTvMfL1Myyycaub+cUEU2/IvpylXkUu7CHBkBj1f/ikdzbD7YQ6FKUbixDxeYvB/xY4fvyroDlQg==", "dev": true, "requires": { "fast-deep-equal": "^3.1.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2", "uri-js": "^4.2.2" } }, "ajv-formats": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", "dev": true, "requires": { "ajv": "^8.0.0" } }, "ajv-keywords": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", "dev": true, "requires": { "fast-deep-equal": "^3.1.3" } }, "ansi-html-community": { "version": "0.0.8", "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", "dev": true }, "anymatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", "dev": true, "requires": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" } }, "array-flatten": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==", "dev": true }, "balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "dev": true }, "batch": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==", "dev": true }, "binary-extensions": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", "dev": true }, "body-parser": { "version": "1.20.0", "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.0.tgz", "integrity": "sha512-DfJ+q6EPcGKZD1QWUjSpqp+Q7bDQTsQIF4zfUAtZ6qk+H/3/QRhg9CEp39ss+/T2vw0+HaidC0ecJj/DRLIaKg==", "dev": true, "requires": { "bytes": "3.1.2", "content-type": "~1.0.4", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", "qs": "6.10.3", "raw-body": "2.5.1", "type-is": "~1.6.18", "unpipe": "1.0.0" }, "dependencies": { "bytes": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", "dev": true } } }, "bonjour-service": { "version": "1.0.13", "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.0.13.tgz", "integrity": "sha512-LWKRU/7EqDUC9CTAQtuZl5HzBALoCYwtLhffW3et7vZMwv3bWLpJf8bRYlMD5OCcDpTfnPgNCV4yo9ZIaJGMiA==", "dev": true, "requires": { "array-flatten": "^2.1.2", "dns-equal": "^1.0.0", "fast-deep-equal": "^3.1.3", "multicast-dns": "^7.2.5" } }, "brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", "dev": true, "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "braces": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", "dev": true, "requires": { "fill-range": "^7.0.1" } }, "browserslist": { "version": "4.21.4", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.4.tgz", "integrity": "sha512-CBHJJdDmgjl3daYjN5Cp5kbTf1mUhZoS+beLklHIvkOWscs83YAhLlF3Wsh/lciQYAcbBJgTOD44VtG31ZM4Hw==", "dev": true, "requires": { "caniuse-lite": "^1.0.30001400", "electron-to-chromium": "^1.4.251", "node-releases": "^2.0.6", "update-browserslist-db": "^1.0.9" } }, "buffer-from": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", "dev": true }, "bytes": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", "dev": true }, "call-bind": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", "dev": true, "requires": { "function-bind": "^1.1.1", "get-intrinsic": "^1.0.2" } }, "caniuse-lite": { "version": "1.0.30001441", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001441.tgz", "integrity": "sha512-OyxRR4Vof59I3yGWXws6i908EtGbMzVUi3ganaZQHmydk1iwDhRnvaPG2WaR0KcqrDFKrxVZHULT396LEPhXfg==", "dev": true }, "chokidar": { "version": "3.5.3", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", "dev": true, "requires": { "anymatch": "~3.1.2", "braces": "~3.0.2", "fsevents": "~2.3.2", "glob-parent": "~5.1.2", "is-binary-path": "~2.1.0", "is-glob": "~4.0.1", "normalize-path": "~3.0.0", "readdirp": "~3.6.0" }, "dependencies": { "glob-parent": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", "dev": true, "requires": { "is-glob": "^4.0.1" } } } }, "chrome-trace-event": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==", "dev": true }, "clone-deep": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", "dev": true, "requires": { "is-plain-object": "^2.0.4", "kind-of": "^6.0.2", "shallow-clone": "^3.0.0" } }, "colorette": { "version": "2.0.19", "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.19.tgz", "integrity": "sha512-3tlv/dIP7FWvj3BsbHrGLJ6l/oKh1O3TcgBqMn+yyCagOxc23fyzDS6HypQbgxWbkpDnf52p1LuR4eWDQ/K9WQ==", "dev": true }, "commander": { "version": "2.20.3", "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", "dev": true }, "compressible": { "version": "2.0.18", "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", "dev": true, "requires": { "mime-db": ">= 1.43.0 < 2" } }, "compression": { "version": "1.7.4", "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", "dev": true, "requires": { "accepts": "~1.3.5", "bytes": "3.0.0", "compressible": "~2.0.16", "debug": "2.6.9", "on-headers": "~1.0.2", "safe-buffer": "5.1.2", "vary": "~1.1.2" } }, "concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", "dev": true }, "connect-history-api-fallback": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", "dev": true }, "content-disposition": { "version": "0.5.4", "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", "dev": true, "requires": { "safe-buffer": "5.2.1" }, "dependencies": { "safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", "dev": true } } }, "content-type": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", "dev": true }, "cookie": { "version": "0.5.0", "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", "dev": true }, "cookie-signature": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==", "dev": true }, "copy-webpack-plugin": { "version": "11.0.0", "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", "dev": true, "requires": { "fast-glob": "^3.2.11", "glob-parent": "^6.0.1", "globby": "^13.1.1", "normalize-path": "^3.0.0", "schema-utils": "^4.0.0", "serialize-javascript": "^6.0.0" } }, "core-util-is": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", "dev": true }, "cross-spawn": { "version": "7.0.3", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", "dev": true, "requires": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "debug": { "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", "dev": true, "requires": { "ms": "2.0.0" } }, "default-gateway": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", "dev": true, "requires": { "execa": "^5.0.0" } }, "define-lazy-prop": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", "dev": true }, "depd": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", "dev": true }, "destroy": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", "dev": true }, "detect-node": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==", "dev": true }, "dir-glob": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", "dev": true, "requires": { "path-type": "^4.0.0" } }, "dns-equal": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg==", "dev": true }, "dns-packet": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.4.0.tgz", "integrity": "sha512-EgqGeaBB8hLiHLZtp/IbaDQTL8pZ0+IvwzSHA6d7VyMDM+B9hgddEMa9xjK5oYnw0ci0JQ6g2XCD7/f6cafU6g==", "dev": true, "requires": { "@leichtgewicht/ip-codec": "^2.0.1" } }, "ee-first": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", "dev": true }, "electron-to-chromium": { "version": "1.4.284", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.284.tgz", "integrity": "sha512-M8WEXFuKXMYMVr45fo8mq0wUrrJHheiKZf6BArTKk9ZBYCKJEOU5H8cdWgDT+qCVZf7Na4lVUaZsA+h6uA9+PA==", "dev": true }, "encodeurl": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", "dev": true }, "enhanced-resolve": { "version": "5.12.0", "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.12.0.tgz", "integrity": "sha512-QHTXI/sZQmko1cbDoNAa3mJ5qhWUUNAq3vR0/YiD379fWQrcfuoX1+HW2S0MTt7XmoPLapdaDKUtelUSPic7hQ==", "dev": true, "requires": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" } }, "envinfo": { "version": "7.8.1", "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.8.1.tgz", "integrity": "sha512-/o+BXHmB7ocbHEAs6F2EnG0ogybVVUdkRunTT2glZU9XAaGmhqskrvKwqXuDfNjEO0LZKWdejEEpnq8aM0tOaw==", "dev": true }, "es-module-lexer": { "version": "0.9.3", "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-0.9.3.tgz", "integrity": "sha512-1HQ2M2sPtxwnvOvT1ZClHyQDiggdNjURWpY2we6aMKCQiUVxTmVs2UYPLIrD84sS+kMdUwfBSylbJPwNnBrnHQ==", "dev": true }, "escalade": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", "dev": true }, "escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", "dev": true }, "eslint-scope": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", "dev": true, "requires": { "esrecurse": "^4.3.0", "estraverse": "^4.1.1" } }, "esrecurse": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, "requires": { "estraverse": "^5.2.0" }, "dependencies": { "estraverse": { "version": "5.3.0", "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", "dev": true } } }, "estraverse": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", "dev": true }, "etag": { "version": "1.8.1", "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", "dev": true }, "eventemitter3": { "version": "4.0.7", "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", "dev": true }, "events": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", "dev": true }, "execa": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", "dev": true, "requires": { "cross-spawn": "^7.0.3", "get-stream": "^6.0.0", "human-signals": "^2.1.0", "is-stream": "^2.0.0", "merge-stream": "^2.0.0", "npm-run-path": "^4.0.1", "onetime": "^5.1.2", "signal-exit": "^3.0.3", "strip-final-newline": "^2.0.0" } }, "express": { "version": "4.18.1", "resolved": "https://registry.npmjs.org/express/-/express-4.18.1.tgz", "integrity": "sha512-zZBcOX9TfehHQhtupq57OF8lFZ3UZi08Y97dwFCkD8p9d/d2Y3M+ykKcwaMDEL+4qyUolgBDX6AblpR3fL212Q==", "dev": true, "requires": { "accepts": "~1.3.8", "array-flatten": "1.1.1", "body-parser": "1.20.0", "content-disposition": "0.5.4", "content-type": "~1.0.4", "cookie": "0.5.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "etag": "~1.8.1", "finalhandler": "1.2.0", "fresh": "0.5.2", "http-errors": "2.0.0", "merge-descriptors": "1.0.1", "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", "path-to-regexp": "0.1.7", "proxy-addr": "~2.0.7", "qs": "6.10.3", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", "send": "0.18.0", "serve-static": "1.15.0", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", "utils-merge": "1.0.1", "vary": "~1.1.2" }, "dependencies": { "array-flatten": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", "dev": true }, "safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", "dev": true } } }, "fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", "dev": true }, "fast-glob": { "version": "3.2.12", "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", "dev": true, "requires": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.4" }, "dependencies": { "glob-parent": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", "dev": true, "requires": { "is-glob": "^4.0.1" } } } }, "fast-json-stable-stringify": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", "dev": true }, "fastest-levenshtein": { "version": "1.0.16", "resolved": "https://registry.npmjs.org/fastest-levenshtein/-/fastest-levenshtein-1.0.16.tgz", "integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==", "dev": true }, "fastq": { "version": "1.15.0", "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", "dev": true, "requires": { "reusify": "^1.0.4" } }, "faye-websocket": { "version": "0.11.4", "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", "dev": true, "requires": { "websocket-driver": ">=0.5.1" } }, "fill-range": { "version": "7.0.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", "dev": true, "requires": { "to-regex-range": "^5.0.1" } }, "finalhandler": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", "dev": true, "requires": { "debug": "2.6.9", "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "on-finished": "2.4.1", "parseurl": "~1.3.3", "statuses": "2.0.1", "unpipe": "~1.0.0" } }, "find-up": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", "dev": true, "requires": { "locate-path": "^5.0.0", "path-exists": "^4.0.0" } }, "follow-redirects": { "version": "1.15.1", "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz", "integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==", "dev": true }, "forwarded": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", "dev": true }, "fresh": { "version": "0.5.2", "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", "dev": true }, "fs-monkey": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.3.tgz", "integrity": "sha512-cybjIfiiE+pTWicSCLFHSrXZ6EilF30oh91FDP9S2B051prEa7QWfrVTQm10/dDpswBDXZugPa1Ogu8Yh+HV0Q==", "dev": true }, "fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", "dev": true }, "fsevents": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", "dev": true, "optional": true }, "function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", "dev": true }, "get-intrinsic": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz", "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==", "dev": true, "requires": { "function-bind": "^1.1.1", "has": "^1.0.3", "has-symbols": "^1.0.1" } }, "get-stream": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", "dev": true }, "glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", "dev": true, "requires": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", "minimatch": "^3.1.1", "once": "^1.3.0", "path-is-absolute": "^1.0.0" } }, "glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", "dev": true, "requires": { "is-glob": "^4.0.3" } }, "glob-to-regexp": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", "dev": true }, "globby": { "version": "13.1.3", "resolved": "https://registry.npmjs.org/globby/-/globby-13.1.3.tgz", "integrity": "sha512-8krCNHXvlCgHDpegPzleMq07yMYTO2sXKASmZmquEYWEmCx6J5UTRbp5RwMJkTJGtcQ44YpiUYUiN0b9mzy8Bw==", "dev": true, "requires": { "dir-glob": "^3.0.1", "fast-glob": "^3.2.11", "ignore": "^5.2.0", "merge2": "^1.4.1", "slash": "^4.0.0" } }, "graceful-fs": { "version": "4.2.10", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==", "dev": true }, "handle-thing": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==", "dev": true }, "has": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", "dev": true, "requires": { "function-bind": "^1.1.1" } }, "has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true }, "has-symbols": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", "dev": true }, "hpack.js": { "version": "2.1.6", "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", "dev": true, "requires": { "inherits": "^2.0.1", "obuf": "^1.0.0", "readable-stream": "^2.0.1", "wbuf": "^1.1.0" } }, "html-entities": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.3.3.tgz", "integrity": "sha512-DV5Ln36z34NNTDgnz0EWGBLZENelNAtkiFA4kyNOG2tDI6Mz1uSWiq1wAKdyjnJwyDiDO7Fa2SO1CTxPXL8VxA==", "dev": true }, "http-deceiver": { "version": "1.2.7", "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==", "dev": true }, "http-errors": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", "dev": true, "requires": { "depd": "2.0.0", "inherits": "2.0.4", "setprototypeof": "1.2.0", "statuses": "2.0.1", "toidentifier": "1.0.1" } }, "http-parser-js": { "version": "0.5.6", "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.6.tgz", "integrity": "sha512-vDlkRPDJn93swjcjqMSaGSPABbIarsr1TLAui/gLDXzV5VsJNdXNzMYDyNBLQkjWQCJ1uizu8T2oDMhmGt0PRA==", "dev": true }, "http-proxy": { "version": "1.18.1", "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", "dev": true, "requires": { "eventemitter3": "^4.0.0", "follow-redirects": "^1.0.0", "requires-port": "^1.0.0" } }, "http-proxy-middleware": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", "dev": true, "requires": { "@types/http-proxy": "^1.17.8", "http-proxy": "^1.18.1", "is-glob": "^4.0.1", "is-plain-obj": "^3.0.0", "micromatch": "^4.0.2" } }, "human-signals": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", "dev": true }, "iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", "dev": true, "requires": { "safer-buffer": ">= 2.1.2 < 3" } }, "ignore": { "version": "5.2.4", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", "dev": true }, "import-local": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz", "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==", "dev": true, "requires": { "pkg-dir": "^4.2.0", "resolve-cwd": "^3.0.0" } }, "inflight": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", "dev": true, "requires": { "once": "^1.3.0", "wrappy": "1" } }, "inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", "dev": true }, "interpret": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/interpret/-/interpret-3.1.1.tgz", "integrity": "sha512-6xwYfHbajpoF0xLW+iwLkhwgvLoZDfjYfoFNu8ftMoXINzwuymNLd9u/KmwtdT2GbR+/Cz66otEGEVVUHX9QLQ==", "dev": true }, "ipaddr.js": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", "dev": true }, "is-binary-path": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", "dev": true, "requires": { "binary-extensions": "^2.0.0" } }, "is-core-module": { "version": "2.11.0", "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.11.0.tgz", "integrity": "sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==", "dev": true, "requires": { "has": "^1.0.3" } }, "is-docker": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", "dev": true }, "is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", "dev": true }, "is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dev": true, "requires": { "is-extglob": "^2.1.1" } }, "is-number": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "dev": true }, "is-plain-obj": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", "dev": true }, "is-plain-object": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", "dev": true, "requires": { "isobject": "^3.0.1" } }, "is-stream": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", "dev": true }, "isarray": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==", "dev": true }, "isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", "dev": true }, "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", "dev": true }, "jest-worker": { "version": "27.5.1", "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", "dev": true, "requires": { "@types/node": "*", "merge-stream": "^2.0.0", "supports-color": "^8.0.0" } }, "json-parse-even-better-errors": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", "dev": true }, "json-schema-traverse": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", "dev": true }, "kind-of": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", "dev": true }, "loader-runner": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", "dev": true }, "locate-path": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "dev": true, "requires": { "p-locate": "^4.1.0" } }, "media-typer": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", "dev": true }, "memfs": { "version": "3.4.7", "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.4.7.tgz", "integrity": "sha512-ygaiUSNalBX85388uskeCyhSAoOSgzBbtVCr9jA2RROssFL9Q19/ZXFqS+2Th2sr1ewNIWgFdLzLC3Yl1Zv+lw==", "dev": true, "requires": { "fs-monkey": "^1.0.3" } }, "merge-descriptors": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==", "dev": true }, "merge-stream": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", "dev": true }, "merge2": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", "dev": true }, "methods": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", "dev": true }, "micromatch": { "version": "4.0.5", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", "dev": true, "requires": { "braces": "^3.0.2", "picomatch": "^2.3.1" } }, "mime": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", "dev": true }, "mime-db": { "version": "1.52.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", "dev": true }, "mime-types": { "version": "2.1.35", "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", "dev": true, "requires": { "mime-db": "1.52.0" } }, "mimic-fn": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", "dev": true }, "minimalistic-assert": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==", "dev": true }, "minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, "requires": { "brace-expansion": "^1.1.7" } }, "ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", "dev": true }, "multicast-dns": { "version": "7.2.5", "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", "dev": true, "requires": { "dns-packet": "^5.2.2", "thunky": "^1.0.2" } }, "negotiator": { "version": "0.6.3", "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", "dev": true }, "neo-async": { "version": "2.6.2", "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", "dev": true }, "node-forge": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", "dev": true }, "node-releases": { "version": "2.0.8", "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.8.tgz", "integrity": "sha512-dFSmB8fFHEH/s81Xi+Y/15DQY6VHW81nXRj86EMSL3lmuTmK1e+aT4wrFCkTbm+gSwkw4KpX+rT/pMM2c1mF+A==", "dev": true }, "normalize-path": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", "dev": true }, "npm-run-path": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", "dev": true, "requires": { "path-key": "^3.0.0" } }, "object-inspect": { "version": "1.12.2", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz", "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==", "dev": true }, "obuf": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==", "dev": true }, "on-finished": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", "dev": true, "requires": { "ee-first": "1.1.1" } }, "on-headers": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", "dev": true }, "once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", "dev": true, "requires": { "wrappy": "1" } }, "onetime": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", "dev": true, "requires": { "mimic-fn": "^2.1.0" } }, "open": { "version": "8.4.0", "resolved": "https://registry.npmjs.org/open/-/open-8.4.0.tgz", "integrity": "sha512-XgFPPM+B28FtCCgSb9I+s9szOC1vZRSwgWsRUA5ylIxRTgKozqjOCrVOqGsYABPYK5qnfqClxZTFBa8PKt2v6Q==", "dev": true, "requires": { "define-lazy-prop": "^2.0.0", "is-docker": "^2.1.1", "is-wsl": "^2.2.0" }, "dependencies": { "is-wsl": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", "dev": true, "requires": { "is-docker": "^2.0.0" } } } }, "p-limit": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "dev": true, "requires": { "p-try": "^2.0.0" } }, "p-locate": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "dev": true, "requires": { "p-limit": "^2.2.0" } }, "p-retry": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", "dev": true, "requires": { "@types/retry": "0.12.0", "retry": "^0.13.1" } }, "p-try": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", "dev": true }, "parseurl": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", "dev": true }, "path-exists": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", "dev": true }, "path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", "dev": true }, "path-key": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", "dev": true }, "path-parse": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", "dev": true }, "path-to-regexp": { "version": "0.1.7", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==", "dev": true }, "path-type": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", "dev": true }, "picocolors": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", "dev": true }, "picomatch": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", "dev": true }, "pkg-dir": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", "dev": true, "requires": { "find-up": "^4.0.0" } }, "process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", "dev": true }, "proxy-addr": { "version": "2.0.7", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", "dev": true, "requires": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" } }, "punycode": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", "dev": true }, "qs": { "version": "6.10.3", "resolved": "https://registry.npmjs.org/qs/-/qs-6.10.3.tgz", "integrity": "sha512-wr7M2E0OFRfIfJZjKGieI8lBKb7fRCH4Fv5KNPEs7gJ8jadvotdsS08PzOKR7opXhZ/Xkjtt3WF9g38drmyRqQ==", "dev": true, "requires": { "side-channel": "^1.0.4" } }, "queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", "dev": true }, "randombytes": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", "dev": true, "requires": { "safe-buffer": "^5.1.0" } }, "range-parser": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", "dev": true }, "raw-body": { "version": "2.5.1", "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", "dev": true, "requires": { "bytes": "3.1.2", "http-errors": "2.0.0", "iconv-lite": "0.4.24", "unpipe": "1.0.0" }, "dependencies": { "bytes": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", "dev": true } } }, "readable-stream": { "version": "2.3.7", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", "dev": true, "requires": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", "isarray": "~1.0.0", "process-nextick-args": "~2.0.0", "safe-buffer": "~5.1.1", "string_decoder": "~1.1.1", "util-deprecate": "~1.0.1" } }, "readdirp": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", "dev": true, "requires": { "picomatch": "^2.2.1" } }, "rechoir": { "version": "0.8.0", "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.8.0.tgz", "integrity": "sha512-/vxpCXddiX8NGfGO/mTafwjq4aFa/71pvamip0++IQk3zG8cbCj0fifNPrjjF1XMXUne91jL9OoxmdykoEtifQ==", "dev": true, "requires": { "resolve": "^1.20.0" } }, "require-from-string": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", "dev": true }, "requires-port": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", "dev": true }, "resolve": { "version": "1.22.1", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", "dev": true, "requires": { "is-core-module": "^2.9.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" } }, "resolve-cwd": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", "dev": true, "requires": { "resolve-from": "^5.0.0" } }, "resolve-from": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", "dev": true }, "retry": { "version": "0.13.1", "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", "dev": true }, "reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", "dev": true }, "run-parallel": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", "dev": true, "requires": { "queue-microtask": "^1.2.2" } }, "safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", "dev": true }, "safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", "dev": true }, "schema-utils": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", "dev": true, "requires": { "@types/json-schema": "^7.0.9", "ajv": "^8.8.0", "ajv-formats": "^2.1.1", "ajv-keywords": "^5.0.0" } }, "select-hose": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", "integrity": "sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo=", "dev": true }, "selfsigned": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.0.1.tgz", "integrity": "sha512-LmME957M1zOsUhG+67rAjKfiWFox3SBxE/yymatMZsAx+oMrJ0YQ8AToOnyCm7xbeg2ep37IHLxdu0o2MavQOQ==", "dev": true, "requires": { "node-forge": "^1" } }, "send": { "version": "0.18.0", "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", "dev": true, "requires": { "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "etag": "~1.8.1", "fresh": "0.5.2", "http-errors": "2.0.0", "mime": "1.6.0", "ms": "2.1.3", "on-finished": "2.4.1", "range-parser": "~1.2.1", "statuses": "2.0.1" }, "dependencies": { "ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true } } }, "serialize-javascript": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", "dev": true, "requires": { "randombytes": "^2.1.0" } }, "serve-index": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", "integrity": "sha1-03aNabHn2C5c4FD/9bRTvqEqkjk=", "dev": true, "requires": { "accepts": "~1.3.4", "batch": "0.6.1", "debug": "2.6.9", "escape-html": "~1.0.3", "http-errors": "~1.6.2", "mime-types": "~2.1.17", "parseurl": "~1.3.2" }, "dependencies": { "depd": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", "dev": true }, "http-errors": { "version": "1.6.3", "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", "dev": true, "requires": { "depd": "~1.1.2", "inherits": "2.0.3", "setprototypeof": "1.1.0", "statuses": ">= 1.4.0 < 2" } }, "inherits": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==", "dev": true }, "setprototypeof": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==", "dev": true }, "statuses": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=", "dev": true } } }, "serve-static": { "version": "1.15.0", "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", "dev": true, "requires": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "parseurl": "~1.3.3", "send": "0.18.0" } }, "setprototypeof": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", "dev": true }, "shallow-clone": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", "dev": true, "requires": { "kind-of": "^6.0.2" } }, "shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", "dev": true, "requires": { "shebang-regex": "^3.0.0" } }, "shebang-regex": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", "dev": true }, "side-channel": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", "dev": true, "requires": { "call-bind": "^1.0.0", "get-intrinsic": "^1.0.2", "object-inspect": "^1.9.0" } }, "signal-exit": { "version": "3.0.7", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "dev": true }, "slash": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", "dev": true }, "sockjs": { "version": "0.3.24", "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", "dev": true, "requires": { "faye-websocket": "^0.11.3", "uuid": "^8.3.2", "websocket-driver": "^0.7.4" } }, "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "dev": true }, "source-map-support": { "version": "0.5.21", "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", "dev": true, "requires": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" } }, "spdy": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", "dev": true, "requires": { "debug": "^4.1.0", "handle-thing": "^2.0.0", "http-deceiver": "^1.2.7", "select-hose": "^2.0.0", "spdy-transport": "^3.0.0" }, "dependencies": { "debug": { "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", "dev": true, "requires": { "ms": "2.1.2" } }, "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", "dev": true } } }, "spdy-transport": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", "dev": true, "requires": { "debug": "^4.1.0", "detect-node": "^2.0.4", "hpack.js": "^2.1.6", "obuf": "^1.1.2", "readable-stream": "^3.0.6", "wbuf": "^1.7.3" }, "dependencies": { "debug": { "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", "dev": true, "requires": { "ms": "2.1.2" } }, "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", "dev": true }, "readable-stream": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", "dev": true, "requires": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" } } } }, "statuses": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", "dev": true }, "string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dev": true, "requires": { "safe-buffer": "~5.1.0" } }, "strip-final-newline": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", "dev": true }, "supports-color": { "version": "8.1.1", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dev": true, "requires": { "has-flag": "^4.0.0" } }, "supports-preserve-symlinks-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", "dev": true }, "tapable": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", "dev": true }, "terser": { "version": "5.16.1", "resolved": "https://registry.npmjs.org/terser/-/terser-5.16.1.tgz", "integrity": "sha512-xvQfyfA1ayT0qdK47zskQgRZeWLoOQ8JQ6mIgRGVNwZKdQMU+5FkCBjmv4QjcrTzyZquRw2FVtlJSRUmMKQslw==", "dev": true, "requires": { "@jridgewell/source-map": "^0.3.2", "acorn": "^8.5.0", "commander": "^2.20.0", "source-map-support": "~0.5.20" } }, "terser-webpack-plugin": { "version": "5.3.6", "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.6.tgz", "integrity": "sha512-kfLFk+PoLUQIbLmB1+PZDMRSZS99Mp+/MHqDNmMA6tOItzRt+Npe3E+fsMs5mfcM0wCtrrdU387UnV+vnSffXQ==", "dev": true, "requires": { "@jridgewell/trace-mapping": "^0.3.14", "jest-worker": "^27.4.5", "schema-utils": "^3.1.1", "serialize-javascript": "^6.0.0", "terser": "^5.14.1" }, "dependencies": { "ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, "requires": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "ajv-keywords": { "version": "3.5.2", "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", "dev": true, "requires": {} }, "json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true }, "schema-utils": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.1.1.tgz", "integrity": "sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw==", "dev": true, "requires": { "@types/json-schema": "^7.0.8", "ajv": "^6.12.5", "ajv-keywords": "^3.5.2" } } } }, "thunky": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==", "dev": true }, "to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "dev": true, "requires": { "is-number": "^7.0.0" } }, "toidentifier": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", "dev": true }, "type-is": { "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", "dev": true, "requires": { "media-typer": "0.3.0", "mime-types": "~2.1.24" } }, "unpipe": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=", "dev": true }, "unstable_wasm": { "version": "file:../pkg" }, "update-browserslist-db": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.10.tgz", "integrity": "sha512-OztqDenkfFkbSG+tRxBeAnCVPckDBcvibKd35yDONx6OU8N7sqgwc7rCbkJ/WcYtVRZ4ba68d6byhC21GFh7sQ==", "dev": true, "requires": { "escalade": "^3.1.1", "picocolors": "^1.0.0" } }, "uri-js": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", "dev": true, "requires": { "punycode": "^2.1.0" } }, "util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", "dev": true }, "utils-merge": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=", "dev": true }, "uuid": { "version": "8.3.2", "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", "dev": true }, "vary": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=", "dev": true }, "watchpack": { "version": "2.4.0", "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", "dev": true, "requires": { "glob-to-regexp": "^0.4.1", "graceful-fs": "^4.1.2" } }, "wbuf": { "version": "1.7.3", "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", "dev": true, "requires": { "minimalistic-assert": "^1.0.0" } }, "webpack": { "version": "5.76.0", "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.76.0.tgz", "integrity": "sha512-l5sOdYBDunyf72HW8dF23rFtWq/7Zgvt/9ftMof71E/yUb1YLOBmTgA2K4vQthB3kotMrSj609txVE0dnr2fjA==", "dev": true, "requires": { "@types/eslint-scope": "^3.7.3", "@types/estree": "^0.0.51", "@webassemblyjs/ast": "1.11.1", "@webassemblyjs/wasm-edit": "1.11.1", "@webassemblyjs/wasm-parser": "1.11.1", "acorn": "^8.7.1", "acorn-import-assertions": "^1.7.6", "browserslist": "^4.14.5", "chrome-trace-event": "^1.0.2", "enhanced-resolve": "^5.10.0", "es-module-lexer": "^0.9.0", "eslint-scope": "5.1.1", "events": "^3.2.0", "glob-to-regexp": "^0.4.1", "graceful-fs": "^4.2.9", "json-parse-even-better-errors": "^2.3.1", "loader-runner": "^4.2.0", "mime-types": "^2.1.27", "neo-async": "^2.6.2", "schema-utils": "^3.1.0", "tapable": "^2.1.1", "terser-webpack-plugin": "^5.1.3", "watchpack": "^2.4.0", "webpack-sources": "^3.2.3" }, "dependencies": { "ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, "requires": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "ajv-keywords": { "version": "3.5.2", "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", "dev": true, "requires": {} }, "json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true }, "schema-utils": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.1.1.tgz", "integrity": "sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw==", "dev": true, "requires": { "@types/json-schema": "^7.0.8", "ajv": "^6.12.5", "ajv-keywords": "^3.5.2" } } } }, "webpack-cli": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/webpack-cli/-/webpack-cli-5.0.1.tgz", "integrity": "sha512-S3KVAyfwUqr0Mo/ur3NzIp6jnerNpo7GUO6so51mxLi1spqsA17YcMXy0WOIJtBSnj748lthxC6XLbNKh/ZC+A==", "dev": true, "requires": { "@discoveryjs/json-ext": "^0.5.0", "@webpack-cli/configtest": "^2.0.1", "@webpack-cli/info": "^2.0.1", "@webpack-cli/serve": "^2.0.1", "colorette": "^2.0.14", "commander": "^9.4.1", "cross-spawn": "^7.0.3", "envinfo": "^7.7.3", "fastest-levenshtein": "^1.0.12", "import-local": "^3.0.2", "interpret": "^3.1.1", "rechoir": "^0.8.0", "webpack-merge": "^5.7.3" }, "dependencies": { "commander": { "version": "9.4.1", "resolved": "https://registry.npmjs.org/commander/-/commander-9.4.1.tgz", "integrity": "sha512-5EEkTNyHNGFPD2H+c/dXXfQZYa/scCKasxWcXJaWnNJ99pnQN9Vnmqow+p+PlFPE63Q6mThaZws1T+HxfpgtPw==", "dev": true } } }, "webpack-dev-middleware": { "version": "5.3.3", "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", "dev": true, "requires": { "colorette": "^2.0.10", "memfs": "^3.4.3", "mime-types": "^2.1.31", "range-parser": "^1.2.1", "schema-utils": "^4.0.0" } }, "webpack-dev-server": { "version": "4.10.0", "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.10.0.tgz", "integrity": "sha512-7dezwAs+k6yXVFZ+MaL8VnE+APobiO3zvpp3rBHe/HmWQ+avwh0Q3d0xxacOiBybZZ3syTZw9HXzpa3YNbAZDQ==", "dev": true, "requires": { "@types/bonjour": "^3.5.9", "@types/connect-history-api-fallback": "^1.3.5", "@types/express": "^4.17.13", "@types/serve-index": "^1.9.1", "@types/serve-static": "^1.13.10", "@types/sockjs": "^0.3.33", "@types/ws": "^8.5.1", "ansi-html-community": "^0.0.8", "bonjour-service": "^1.0.11", "chokidar": "^3.5.3", "colorette": "^2.0.10", "compression": "^1.7.4", "connect-history-api-fallback": "^2.0.0", "default-gateway": "^6.0.3", "express": "^4.17.3", "graceful-fs": "^4.2.6", "html-entities": "^2.3.2", "http-proxy-middleware": "^2.0.3", "ipaddr.js": "^2.0.1", "open": "^8.0.9", "p-retry": "^4.5.0", "rimraf": "^3.0.2", "schema-utils": "^4.0.0", "selfsigned": "^2.0.1", "serve-index": "^1.9.1", "sockjs": "^0.3.24", "spdy": "^4.0.2", "webpack-dev-middleware": "^5.3.1", "ws": "^8.4.2" }, "dependencies": { "ipaddr.js": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.0.1.tgz", "integrity": "sha512-1qTgH9NG+IIJ4yfKs2e6Pp1bZg8wbDbKHT21HrLIeYBTRLgMYKnMTPAuI3Lcs61nfx5h1xlXnbJtH1kX5/d/ng==", "dev": true }, "rimraf": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "dev": true, "requires": { "glob": "^7.1.3" } } } }, "webpack-merge": { "version": "5.8.0", "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.8.0.tgz", "integrity": "sha512-/SaI7xY0831XwP6kzuwhKWVKDP9t1QY1h65lAFLbZqMPIuYcD9QAW4u9STIbU9kaJbPBB/geU/gLr1wDjOhQ+Q==", "dev": true, "requires": { "clone-deep": "^4.0.1", "wildcard": "^2.0.0" } }, "webpack-sources": { "version": "3.2.3", "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", "dev": true }, "websocket-driver": { "version": "0.7.4", "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", "dev": true, "requires": { "http-parser-js": ">=0.5.1", "safe-buffer": ">=5.1.0", "websocket-extensions": ">=0.1.1" } }, "websocket-extensions": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", "dev": true }, "which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "dev": true, "requires": { "isexe": "^2.0.0" } }, "wildcard": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.0.tgz", "integrity": "sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw==", "dev": true }, "wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", "dev": true }, "ws": { "version": "8.8.1", "resolved": "https://registry.npmjs.org/ws/-/ws-8.8.1.tgz", "integrity": "sha512-bGy2JzvzkPowEJV++hF07hAD6niYSr0JzBNo/J29WsB57A2r7Wlc1UFcTR9IzrPvuNVO4B8LGqF8qcpsVOhJCA==", "dev": true, "requires": {} } } }
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/LICENSE-MIT
Copyright (c) [year] [name] Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/bootstrap.js
// A dependency graph that contains any wasm must all be imported // asynchronously. This `bootstrap.js` file does the single async import, so // that no one else needs to worry about it again. import("./index.js") .catch(e => console.error("Error importing `index.js`:", e));
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/.travis.yml
language: node_js node_js: "10" script: - ./node_modules/.bin/webpack
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/LICENSE-APACHE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/.bin/create-wasm-app.js
#!/usr/bin/env node const { spawn } = require("child_process"); const fs = require("fs"); let folderName = '.'; if (process.argv.length >= 3) { folderName = process.argv[2]; if (!fs.existsSync(folderName)) { fs.mkdirSync(folderName); } } const clone = spawn("git", ["clone", "https://github.com/rustwasm/create-wasm-app.git", folderName]); clone.on("close", code => { if (code !== 0) { console.error("cloning the template failed!") process.exit(code); } else { console.log("🦀 Rust + 🕸 Wasm = ❤"); } });
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/src/utils.rs
pub fn set_panic_hook() { // When the `console_error_panic_hook` feature is enabled, we can call the // `set_panic_hook` function at least once during initialization, and then // we will get better error messages if our code ever panics. // // For more details see // https://github.com/rustwasm/console_error_panic_hook#readme #[cfg(feature = "console_error_panic_hook")] console_error_panic_hook::set_once(); }
0
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/src/lib.rs
mod utils; use tokenizers::models::bpe::{Vocab, BPE}; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; // When the `wee_alloc` feature is enabled, use `wee_alloc` as the global // allocator. #[cfg(feature = "wee_alloc")] #[global_allocator] static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; #[wasm_bindgen] pub fn tokenize(string: &str) -> Vec<u32> { let vocab: Vocab = vec![ ("a".to_string(), 0), ("##b".to_string(), 1), ("##c".to_string(), 2), ("ab".to_string(), 3), ("abc".to_string(), 4), ] .into_iter() .collect(); let merges = vec![ ("a".to_string(), "##b".to_string()), ("ab".to_string(), "##c".to_string()), ]; let bpe = BPE::builder() .vocab_and_merges(vocab, merges) .unk_token("[UNK]".to_string()) .continuing_subword_prefix("##".to_string()) .build() .unwrap(); let tokenizer = Tokenizer::new(bpe); tokenizer .encode(string, false) .unwrap() .get_ids() .into_iter() .cloned() .collect() }
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/tests/unigram.rs
#[cfg(not(debug_assertions))] use assert_approx_eq::assert_approx_eq; use std::collections::HashMap; use std::fs::read_to_string; use std::path::Path; #[cfg(not(debug_assertions))] use tokenizers::models::unigram::Lattice; use tokenizers::models::unigram::Unigram; use tokenizers::models::unigram::UnigramTrainer; use tokenizers::tokenizer::Model; #[test] fn test_unigram_from_file() { let model = Unigram::load(Path::new("data/unigram.json")).unwrap(); let string = "吾輩《わがはい》は猫である。名前はまだ無い。"; assert_eq!( model .tokenize(string) .unwrap() .iter() .map(|tok| tok.value.clone()) .collect::<Vec<_>>(), vec![ "吾輩", "《", "わが", "はい", "》", "は", "猫", "である", "。", "名前", "はまだ", "無い", "。" ] ); } #[test] fn test_train_unigram_from_file() { let content = read_to_string("data/small.txt").unwrap(); let mut word_counts = HashMap::new(); content.split_whitespace().for_each(|word| { // This is important for the test of char vs u8 let word = format!("▁{}", word); *word_counts.entry(word).or_insert(0) += 1; }); // println!("Words counts {:?}", word_counts); let trainer = UnigramTrainer::builder() .show_progress(false) .unk_token(Some("<UNK>".into())) .build() .unwrap(); let mut model = Unigram::default(); let sentences: Vec<_> = word_counts .iter() .map(|(s, i)| (s.to_owned(), *i)) .collect(); trainer.do_train(sentences, &mut model).unwrap(); assert_eq!(model.get_vocab_size(), 719); } #[cfg(not(debug_assertions))] #[test] fn test_sample() { let mut lattice = Lattice::from("ABC", 0, 2); lattice.insert(0, 1, 1.0, 3); // A lattice.insert(1, 1, 1.2, 4); // B lattice.insert(2, 1, 1.5, 5); // C lattice.insert(0, 2, 1.6, 6); // AB lattice.insert(1, 2, 1.7, 7); // BC lattice.insert(0, 3, 1.8, 8); // ABC let thetas: Vec<f64> = vec![0.0, 0.01, 0.5, 0.7, 1.0]; for theta in thetas { let mut probs: HashMap<String, f64> = HashMap::new(); probs.insert("A B C".to_string(), (theta * (1.0 + 1.2 + 1.5)).exp()); probs.insert("AB C".to_string(), (theta * (1.6 + 1.5)).exp()); probs.insert("A BC".to_string(), (theta * (1.0 + 1.7)).exp()); probs.insert("ABC".to_string(), (theta * (1.8)).exp()); // Computes expected probabilities. let mut z = 0.0; for (_, p) in probs.iter() { z += p; } for (_, p) in probs.iter_mut() { *p /= z; } let n_trials = 10_000; let mut freq: HashMap<String, u32> = HashMap::new(); for _ in 0..n_trials { let string = lattice.sample_token(theta).join(" "); *freq.entry(string).or_insert(0) += 1; } assert_eq!(freq.len(), probs.len()); for (s, p) in probs.iter() { assert_approx_eq!(1.0 * (freq[s] as f64) / (n_trials as f64), p, 0.03) } } }
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/tests/serialization.rs
mod common; use common::*; use tokenizers::decoders::byte_level::ByteLevel; use tokenizers::decoders::DecoderWrapper; use tokenizers::models::bpe::BPE; use tokenizers::models::wordlevel::WordLevel; use tokenizers::models::wordpiece::WordPiece; use tokenizers::models::ModelWrapper; use tokenizers::normalizers::bert::BertNormalizer; use tokenizers::normalizers::unicode::{NFC, NFKC}; use tokenizers::normalizers::NormalizerWrapper; use tokenizers::pre_tokenizers::bert::BertPreTokenizer; use tokenizers::pre_tokenizers::delimiter::CharDelimiterSplit; use tokenizers::pre_tokenizers::split::{Split, SplitPattern}; use tokenizers::pre_tokenizers::whitespace::Whitespace; use tokenizers::pre_tokenizers::PreTokenizerWrapper; use tokenizers::processors::bert::BertProcessing; use tokenizers::processors::PostProcessorWrapper; use tokenizers::{SplitDelimiterBehavior, Tokenizer, TokenizerImpl}; #[test] fn bpe_serde() { let bpe = get_byte_level_bpe(); let ser = serde_json::to_string(&bpe).unwrap(); let de = serde_json::from_str(&ser).unwrap(); assert_eq!(bpe, de); } #[test] fn wordpiece_serde() { let wordpiece = get_bert_wordpiece(); let ser = serde_json::to_string(&wordpiece).unwrap(); let de = serde_json::from_str(&ser).unwrap(); assert_eq!(wordpiece, de); } #[test] fn wordlevel_serde() { let wordlevel = WordLevel::from_file("data/gpt2-vocab.json", "<unk>".into()).unwrap(); let ser = serde_json::to_string(&wordlevel).unwrap(); let de = serde_json::from_str(&ser).unwrap(); assert_eq!(wordlevel, de); } #[test] fn normalizers() { // Test unit struct let nfc = NFC; let nfc_ser = serde_json::to_string(&nfc).unwrap(); assert_eq!(nfc_ser, r#"{"type":"NFC"}"#); // empty struct can deserialize from self serde_json::from_str::<NFC>(&nfc_ser).unwrap(); let err: Result<NFKC, _> = serde_json::from_str(&nfc_ser); assert!(err.is_err(), "NFKC shouldn't be deserializable from NFC"); // wrapper can can deserialize from inner let nfc_wrapped: NormalizerWrapper = serde_json::from_str(&nfc_ser).unwrap(); match &nfc_wrapped { NormalizerWrapper::NFC(_) => (), _ => panic!("NFC wrapped with incorrect variant"), } let ser_wrapped = serde_json::to_string(&nfc_wrapped).unwrap(); assert_eq!(ser_wrapped, nfc_ser); // Test non-empty roundtrip let bert = BertNormalizer::default(); let bert_ser = serde_json::to_string(&bert).unwrap(); assert_eq!( bert_ser, r#"{"type":"BertNormalizer","clean_text":true,"handle_chinese_chars":true,"strip_accents":null,"lowercase":true}"# ); // make sure we can deserialize to self serde_json::from_str::<BertNormalizer>(&bert_ser).unwrap(); // wrapper can deserialize from inner serialization let bert_wrapped: NormalizerWrapper = serde_json::from_str(&bert_ser).unwrap(); match &bert_wrapped { NormalizerWrapper::BertNormalizer(_) => (), _ => panic!("BertNormalizer wrapped with incorrect variant"), } // wrapped serializes same way as inner let ser_wrapped = serde_json::to_string(&bert_wrapped).unwrap(); assert_eq!(ser_wrapped, bert_ser); } #[test] fn processors() { let bert = BertProcessing::new(("SEP".into(), 0), ("CLS".into(), 0)); let bert_ser = serde_json::to_string(&bert).unwrap(); assert_eq!( bert_ser, r#"{"type":"BertProcessing","sep":["SEP",0],"cls":["CLS",0]}"# ); serde_json::from_str::<BertProcessing>(&bert_ser).unwrap(); let bert_wrapped: PostProcessorWrapper = serde_json::from_str(&bert_ser).unwrap(); match &bert_wrapped { PostProcessorWrapper::Bert(_) => (), _ => panic!("Bert wrapped with incorrect variant"), } let ser_wrapped = serde_json::to_string(&bert_wrapped).unwrap(); assert_eq!(ser_wrapped, bert_ser); } #[test] fn pretoks() { // Test unit struct let bert = BertPreTokenizer; let bert_ser = serde_json::to_string(&bert).unwrap(); assert_eq!(bert_ser, r#"{"type":"BertPreTokenizer"}"#); // empty struct can deserialize from self serde_json::from_str::<BertPreTokenizer>(&bert_ser).unwrap(); let err: Result<Whitespace, _> = serde_json::from_str(&bert_ser); assert!( err.is_err(), "Whitespace shouldn't be deserializable from BertPreTokenizer" ); // wrapper can can deserialize from inner let bert_wrapped: PreTokenizerWrapper = serde_json::from_str(&bert_ser).unwrap(); match &bert_wrapped { PreTokenizerWrapper::BertPreTokenizer(_) => (), _ => panic!("Bert wrapped with incorrect variant"), } let ser_wrapped = serde_json::to_string(&bert_wrapped).unwrap(); assert_eq!(ser_wrapped, bert_ser); // Test non-empty roundtrip let ch = CharDelimiterSplit::new(' '); let ch_ser = serde_json::to_string(&ch).unwrap(); assert_eq!(ch_ser, r#"{"type":"CharDelimiterSplit","delimiter":" "}"#); // make sure we can deserialize to self serde_json::from_str::<CharDelimiterSplit>(&ch_ser).unwrap(); // wrapper can deserialize from inner serialization let ch_wrapped: PreTokenizerWrapper = serde_json::from_str(&ch_ser).unwrap(); match &ch_wrapped { PreTokenizerWrapper::Delimiter(_) => (), _ => panic!("CharDelimiterSplit wrapped with incorrect variant"), } // wrapped serializes same way as inner let ser_wrapped = serde_json::to_string(&ch_wrapped).unwrap(); assert_eq!(ser_wrapped, ch_ser); let wsp = Whitespace {}; let wsp_ser = serde_json::to_string(&wsp).unwrap(); assert_eq!(wsp_ser, r#"{"type":"Whitespace"}"#); serde_json::from_str::<Whitespace>(&wsp_ser).unwrap(); let err: Result<BertPreTokenizer, _> = serde_json::from_str(&wsp_ser); assert!( err.is_err(), "BertPreTokenizer shouldn't be deserializable from Whitespace" ); let pattern: SplitPattern = "[SEP]".into(); let pretok = Split::new(pattern, SplitDelimiterBehavior::Isolated, false).unwrap(); let pretok_str = serde_json::to_string(&pretok).unwrap(); assert_eq!( pretok_str, r#"{"type":"Split","pattern":{"String":"[SEP]"},"behavior":"Isolated","invert":false}"# ); assert_eq!(serde_json::from_str::<Split>(&pretok_str).unwrap(), pretok); let pattern = SplitPattern::Regex("[SEP]".to_string()); let pretok = Split::new(pattern, SplitDelimiterBehavior::Isolated, false).unwrap(); let pretok_str = serde_json::to_string(&pretok).unwrap(); assert_eq!( pretok_str, r#"{"type":"Split","pattern":{"Regex":"[SEP]"},"behavior":"Isolated","invert":false}"# ); assert_eq!(serde_json::from_str::<Split>(&pretok_str).unwrap(), pretok); } #[test] fn decoders() { let byte_level = ByteLevel::default(); let byte_level_ser = serde_json::to_string(&byte_level).unwrap(); assert_eq!( byte_level_ser, r#"{"type":"ByteLevel","add_prefix_space":true,"trim_offsets":true,"use_regex":true}"# ); serde_json::from_str::<ByteLevel>(&byte_level_ser).unwrap(); let byte_level_wrapper: DecoderWrapper = serde_json::from_str(&byte_level_ser).unwrap(); match &byte_level_wrapper { DecoderWrapper::ByteLevel(_) => (), _ => panic!("ByteLevel wrapped with incorrect variant"), } let ser_wrapped = serde_json::to_string(&byte_level_wrapper).unwrap(); assert_eq!(ser_wrapped, byte_level_ser); } #[test] fn models() { let bpe = BPE::default(); let bpe_ser = serde_json::to_string(&bpe).unwrap(); serde_json::from_str::<BPE>(&bpe_ser).unwrap(); let bpe_wrapper: ModelWrapper = serde_json::from_str(&bpe_ser).unwrap(); match &bpe_wrapper { ModelWrapper::BPE(_) => (), _ => panic!("BPE wrapped with incorrect variant"), } let ser_wrapped = serde_json::to_string(&bpe_wrapper).unwrap(); assert_eq!(ser_wrapped, bpe_ser); } #[test] fn tokenizer() { let wordpiece = WordPiece::default(); let mut tokenizer = Tokenizer::new(wordpiece); tokenizer.with_normalizer(NFC); let ser = serde_json::to_string(&tokenizer).unwrap(); let _: Tokenizer = serde_json::from_str(&ser).unwrap(); let unwrapped_nfc_tok: TokenizerImpl< WordPiece, NFC, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper, > = serde_json::from_str(&ser).unwrap(); assert_eq!(serde_json::to_string(&unwrapped_nfc_tok).unwrap(), ser); let err: Result< TokenizerImpl<WordPiece, NFKC, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper>, _, > = serde_json::from_str(&ser); assert!(err.is_err(), "NFKC shouldn't be deserializable from NFC"); let de: TokenizerImpl< WordPiece, NormalizerWrapper, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper, > = serde_json::from_str(&ser).unwrap(); assert_eq!(serde_json::to_string(&de).unwrap(), ser); } #[test] fn test_deserialize_long_file() { let _tokenizer = Tokenizer::from_file("data/albert-base-v1-tokenizer.json").unwrap(); }
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/tests/offsets.rs
mod common; use common::*; use tokenizers::tokenizer::AddedToken; macro_rules! check_offsets { ($input: expr, $output:expr, $offset:expr, $result:expr) => { let offsets = $output.get_offsets()[$offset]; assert_eq!(&$input[offsets.0..offsets.1], $result); }; } #[test] fn byte_level_basic() { // Without trimming offsets let tokenizer = get_byte_level(true, false); let input = "Hello there, how are you?"; let output = tokenizer.encode(input, false).unwrap(); check_offsets!(input, output, 0, "Hello"); check_offsets!(input, output, 1, " there"); check_offsets!(input, output, 2, ","); check_offsets!(input, output, 3, " how"); check_offsets!(input, output, 4, " are"); check_offsets!(input, output, 5, " you"); check_offsets!(input, output, 6, "?"); // And when trimming offsets: let tokenizer = get_byte_level(true, true); let input = "Hello there, how are you?"; let output = tokenizer.encode(input, false).unwrap(); check_offsets!(input, output, 0, "Hello"); check_offsets!(input, output, 1, "there"); check_offsets!(input, output, 2, ","); check_offsets!(input, output, 3, "how"); check_offsets!(input, output, 4, "are"); check_offsets!(input, output, 5, "you"); check_offsets!(input, output, 6, "?"); } #[test] fn byte_level_unicode() { let tokenizer = get_byte_level(true, false); let input = "i⭢j"; let output = tokenizer.encode(input, false).unwrap(); check_offsets!(input, output, 1, "⭢"); check_offsets!(input, output, 2, "⭢"); check_offsets!(input, output, 3, "⭢"); } #[test] fn byte_level_double_sequence() { let input_a = "My name is Anthony"; let input_b = "What is my name?"; // Without trimming offsets let tokenizer = get_byte_level(true, false); let output = tokenizer.encode((input_a, input_b), false).unwrap(); let offsets = output.get_offsets(); assert_eq!( offsets, &[ (0, 2), (2, 7), (7, 10), (10, 18), (0, 4), (4, 7), (7, 10), (10, 15), (15, 16) ] ); assert_eq!( output.get_word_ids(), &[ Some(0), Some(1), Some(2), Some(3), Some(0), Some(1), Some(2), Some(3), Some(4) ] ); assert_eq!(output.get_type_ids(), &[0, 0, 0, 0, 1, 1, 1, 1, 1]); // When trimming offsets let tokenizer = get_byte_level(true, true); let output = tokenizer.encode((input_a, input_b), false).unwrap(); let offsets = output.get_offsets(); assert_eq!( offsets, &[ (0, 2), (3, 7), (8, 10), (11, 18), (0, 4), (5, 7), (8, 10), (11, 15), (15, 16) ] ); } #[test] fn byte_level_pre_tokenized_sequence() { let input = ["My", "name", "is", "Anthonino"]; // Without trimming offsets let tokenizer = get_byte_level(true, false); let output = tokenizer.encode(&input[..], false).unwrap(); assert_eq!( output.get_tokens(), &["ĠMy", "Ġname", "Ġis", "ĠAnth", "on", "ino"] ); assert_eq!( output.get_word_ids(), &[Some(0), Some(1), Some(2), Some(3), Some(3), Some(3)] ); assert_eq!( output.get_offsets(), &[(0, 2), (0, 4), (0, 2), (0, 4), (4, 6), (6, 9)] ); } #[test] #[ignore] fn byte_level_pre_tokenized_sequence_with_trimming() { let input = ["My", "name", "is", "Anthonino"]; // When trimming offsets (expect same result) let tokenizer = get_byte_level(true, true); let output = tokenizer.encode(&input[..], false).unwrap(); assert_eq!( output.get_word_ids(), &[Some(0), Some(1), Some(2), Some(3), Some(3), Some(3)] ); assert_eq!( output.get_offsets(), &[(0, 2), (0, 4), (0, 2), (0, 4), (4, 6), (6, 9)] ); } #[test] fn split_on_added_tokens_bert() { let input = "Yesterday I saw a [MASK] far away"; let mut tokenizer = get_bert(); tokenizer.add_special_tokens(&[AddedToken::from("[MASK]", true)]); let output = tokenizer.encode(input, false).unwrap(); assert_eq!( output.get_offsets(), &[ (0, 9), (10, 11), (12, 15), (16, 17), (18, 24), (25, 28), (29, 33) ] ); assert_eq!( output.get_tokens(), &["yesterday", "i", "saw", "a", "[MASK]", "far", "away"] ); assert_eq!( output.get_word_ids(), &[ Some(0), Some(1), Some(2), Some(3), Some(4), Some(5), Some(6) ] ); }
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/tests/added_tokens.rs
mod common; use common::*; use tokenizers::tokenizer::AddedToken; #[test] fn add_tokens() { let mut tokenizer = get_empty(); assert_eq!( tokenizer.add_special_tokens(&[ AddedToken::from("<cls>", true), AddedToken::from("<sep>", true) ]), 2 ); assert_eq!(tokenizer.token_to_id("<cls>"), Some(0)); assert_eq!(tokenizer.token_to_id("<sep>"), Some(1)); assert_eq!( tokenizer.add_tokens(&[ AddedToken::from("hello", false), AddedToken::from("world", false) ]), 2 ); assert_eq!(tokenizer.token_to_id("hello"), Some(2)); assert_eq!(tokenizer.token_to_id("world"), Some(3)); } #[test] fn lstrip_tokens() { let mut tokenizer = get_byte_level(true, false); tokenizer.add_special_tokens(&[AddedToken::from("<mask>", true).lstrip(true)]); let input = "I saw a <mask> 😺"; let output = tokenizer.encode(input, false).unwrap(); assert_eq!( output.get_tokens(), &["ĠI", "Ġsaw", "Ġa", " <mask>", "ĠðŁĺ", "º"] ); assert_eq!( output.get_offsets(), &[(0, 1), (1, 5), (5, 7), (7, 14), (14, 19), (15, 19)] ); } #[test] fn rstrip_tokens() { let mut tokenizer = get_byte_level(false, false); tokenizer.add_special_tokens(&[AddedToken::from("<mask>", true).rstrip(true)]); let input = "I saw a <mask> 😺"; let output = tokenizer.encode(input, false).unwrap(); assert_eq!( output.get_tokens(), &["I", "Ġsaw", "Ġa", "Ġ", "<mask> ", "ðŁĺ", "º"] ); // When `add_prefix_space = true` rstrip cannot work as a prefix space is added // to the next token let mut tokenizer = get_byte_level(true, false); tokenizer.add_special_tokens(&[AddedToken::from("<mask>", true).rstrip(true)]); let input = "I saw a <mask> 😺"; let output = tokenizer.encode(input, false).unwrap(); assert_eq!( output.get_tokens(), &["ĠI", "Ġsaw", "Ġa", "Ġ", "<mask> ", "ĠðŁĺ", "º"] ); } #[test] fn single_word_tokens() { // If `single_word = true` it shouldn't split `dancing` let mut tokenizer = get_byte_level(false, false); tokenizer.add_special_tokens(&[AddedToken::from("ing", true).single_word(true)]); let input = "I like dancing"; let output = tokenizer.encode(input, false).unwrap(); assert_eq!(output.get_tokens(), &["I", "Ġlike", "Ġdancing"]); // If `single_word = false` it should split `dancing` let mut tokenizer = get_byte_level(false, false); tokenizer.add_special_tokens(&[AddedToken::from("ing", true).single_word(false)]); let input = "I like dancing"; let output = tokenizer.encode(input, false).unwrap(); assert_eq!(output.get_tokens(), &["I", "Ġlike", "Ġd", "anc", "ing"]); } #[test] fn overlapping_tokens() { let mut tokenizer = get_byte_level(false, false); tokenizer.add_special_tokens(&[AddedToken::from("danc", true)]); tokenizer.add_special_tokens(&[AddedToken::from("nci", true)]); tokenizer.add_special_tokens(&[AddedToken::from("ing", true)]); let input = "I like dancing"; let output = tokenizer.encode(input, false).unwrap(); assert_eq!(output.get_tokens(), &["I", "Ġlike", "Ġ", "danc", "ing"]); let mut tokenizer = get_byte_level(false, false); tokenizer.add_special_tokens(&[AddedToken::from("nci", true)]); tokenizer.add_special_tokens(&[AddedToken::from("danc", true)]); tokenizer.add_special_tokens(&[AddedToken::from("ing", true)]); tokenizer.add_special_tokens(&[AddedToken::from("ike", true)]); let output = tokenizer.encode(input, false).unwrap(); // Breaking change but following `transformers` breaking change. // This behavior is deemed not used in practice: // https://github.com/huggingface/transformers/pull/13220 // Order does NOT matter. (We could make it work again but the trie // would need to keep insertion order too) // // assert_eq!(output.get_tokens(), &["I", "Ġlike", "Ġda", "nci", "ng"]); assert_eq!(output.get_tokens(), &["I", "Ġl", "ike", "Ġ", "danc", "ing"]); }
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/tests/documentation.rs
use tokenizers::models::bpe::{BpeTrainerBuilder, BPE}; use tokenizers::normalizers::{Sequence, Strip, NFC}; use tokenizers::pre_tokenizers::byte_level::ByteLevel; use tokenizers::{AddedToken, TokenizerBuilder}; use tokenizers::{DecoderWrapper, NormalizerWrapper, PostProcessorWrapper, PreTokenizerWrapper}; use tokenizers::{Tokenizer, TokenizerImpl}; #[test] fn train_tokenizer() { let vocab_size: usize = 100; let mut tokenizer = TokenizerBuilder::new() .with_model(BPE::default()) .with_normalizer(Some(Sequence::new(vec![ Strip::new(true, true).into(), NFC.into(), ]))) .with_pre_tokenizer(Some(ByteLevel::default())) .with_post_processor(Some(ByteLevel::default())) .with_decoder(Some(ByteLevel::default())) .build() .unwrap(); let mut trainer = BpeTrainerBuilder::new() .show_progress(false) .vocab_size(vocab_size) .min_frequency(0) .special_tokens(vec![ AddedToken::from(String::from("<s>"), true), AddedToken::from(String::from("<pad>"), true), AddedToken::from(String::from("</s>"), true), AddedToken::from(String::from("<unk>"), true), AddedToken::from(String::from("<mask>"), true), ]) .build(); let pretty = true; tokenizer .train_from_files(&mut trainer, vec!["data/small.txt".to_string()]) .unwrap() .save("data/tokenizer.json", pretty) .unwrap(); } #[test] fn load_tokenizer() { let tokenizer = Tokenizer::from_file("data/roberta.json").unwrap(); let example = "This is an example"; let ids = vec![713, 16, 41, 1246]; let tokens = vec!["This", "Ġis", "Ġan", "Ġexample"]; let encodings = tokenizer.encode(example, false).unwrap(); assert_eq!(encodings.get_ids(), ids); assert_eq!(encodings.get_tokens(), tokens); let decoded = tokenizer.decode(&ids, false).unwrap(); assert_eq!(decoded, example); } #[test] #[ignore] fn quicktour_slow_train() -> tokenizers::Result<()> { // START quicktour_init_tokenizer use tokenizers::models::bpe::BPE; let mut tokenizer: TokenizerImpl< BPE, NormalizerWrapper, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper, > = TokenizerImpl::new( BPE::builder() .unk_token("[UNK]".to_string()) .build() .unwrap(), ); // END quicktour_init_tokenizer // START quicktour_init_trainer use tokenizers::models::bpe::BpeTrainer; let mut trainer = BpeTrainer::builder() .special_tokens(vec![ AddedToken::from("[UNK]", true), AddedToken::from("[CLS]", true), AddedToken::from("[SEP]", true), AddedToken::from("[PAD]", true), AddedToken::from("[MASK]", true), ]) .build(); // END quicktour_init_trainer // START quicktour_init_pretok use tokenizers::pre_tokenizers::whitespace::Whitespace; tokenizer.with_pre_tokenizer(Whitespace {}); // END quicktour_init_pretok // START quicktour_train let files = vec![ "data/wikitext-103-raw/wiki.train.raw".into(), "data/wikitext-103-raw/wiki.test.raw".into(), "data/wikitext-103-raw/wiki.valid.raw".into(), ]; tokenizer.train_from_files(&mut trainer, files)?; // END quicktour_train // START quicktour_save tokenizer.save("data/tokenizer-wiki.json", false)?; // END quicktour_save Ok(()) } #[test] fn quicktour() -> tokenizers::Result<()> { // START quicktour_reload_tokenizer let mut tokenizer = Tokenizer::from_file("data/tokenizer-wiki.json")?; // END quicktour_reload_tokenizer // START quicktour_encode let output = tokenizer.encode("Hello, y'all! How are you 😁 ?", true)?; // END quicktour_encode // START quicktour_print_tokens println!("{:?}", output.get_tokens()); // ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?",] // END quicktour_print_tokens assert_eq!( output.get_tokens(), ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?",] ); // START quicktour_print_ids println!("{:?}", output.get_ids()); // [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35] // END quicktour_print_ids assert_eq!( output.get_ids(), [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35] ); // START quicktour_print_offsets println!("{:?}", output.get_offsets()[9]); // (26, 30) // END quicktour_print_offsets assert_eq!(output.get_offsets()[9], (26, 30)); // START quicktour_use_offsets let sentence = "Hello, y'all! How are you 😁 ?"; println!("{}", &sentence[26..30]); // "😁" // END quicktour_use_offsets // START quicktour_check_sep println!("{}", tokenizer.token_to_id("[SEP]").unwrap()); // 2 // END quicktour_check_sep assert_eq!(tokenizer.token_to_id("[SEP]"), Some(2)); // START quicktour_init_template_processing use tokenizers::processors::template::TemplateProcessing; let special_tokens = vec![ ("[CLS]", tokenizer.token_to_id("[CLS]").unwrap()), ("[SEP]", tokenizer.token_to_id("[SEP]").unwrap()), ]; tokenizer.with_post_processor( TemplateProcessing::builder() .try_single("[CLS] $A [SEP]") .unwrap() .try_pair("[CLS] $A [SEP] $B:1 [SEP]:1") .unwrap() .special_tokens(special_tokens) .build()?, ); // END quicktour_init_template_processing // START quicktour_print_special_tokens let output = tokenizer.encode("Hello, y'all! How are you 😁 ?", true)?; println!("{:?}", output.get_tokens()); // ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"] // END quicktour_print_special_tokens assert_eq!( output.get_tokens(), ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"] ); // START quicktour_print_special_tokens_pair let output = tokenizer.encode(("Hello, y'all!", "How are you 😁 ?"), true)?; println!("{:?}", output.get_tokens()); // ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"] // END quicktour_print_special_tokens_pair assert_eq!( output.get_tokens(), [ "[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]" ] ); // START quicktour_print_type_ids println!("{:?}", output.get_type_ids()); // [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] // END quicktour_print_type_ids assert_eq!( output.get_type_ids(), [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] ); // START quicktour_encode_batch let output = tokenizer.encode_batch(vec!["Hello, y'all!", "How are you 😁 ?"], true)?; // END quicktour_encode_batch println!("{:?}", output); // START quicktour_encode_batch_pair let output = tokenizer.encode_batch( vec![ ("Hello, y'all!", "How are you 😁 ?"), ("Hello to you too!", "I'm fine, thank you!"), ], true, )?; // END quicktour_encode_batch_pair println!("{:?}", output); // START quicktour_enable_padding use tokenizers::PaddingParams; tokenizer.with_padding(Some(PaddingParams { pad_id: 3, pad_token: "[PAD]".to_string(), ..PaddingParams::default() })); // END quicktour_enable_padding // START quicktour_print_batch_tokens let output = tokenizer.encode_batch(vec!["Hello, y'all!", "How are you 😁 ?"], true)?; println!("{:?}", output[1].get_tokens()); // ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"] // END quicktour_print_batch_tokens assert_eq!( output[1].get_tokens(), ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"] ); // START quicktour_print_attention_mask println!("{:?}", output[1].get_attention_mask()); // [1, 1, 1, 1, 1, 1, 1, 0] // END quicktour_print_attention_mask assert_eq!(output[1].get_attention_mask(), [1, 1, 1, 1, 1, 1, 1, 0]); Ok(()) } #[test] fn pipeline() -> tokenizers::Result<()> { // START pipeline_reload_tokenizer use tokenizers::Tokenizer; let mut tokenizer = Tokenizer::from_file("data/tokenizer-wiki.json")?; // END pipeline_reload_tokenizer // START pipeline_setup_normalizer use tokenizers::normalizers::{ strip::StripAccents, unicode::NFD, utils::Sequence as NormalizerSequence, }; let normalizer = NormalizerSequence::new(vec![NFD.into(), StripAccents.into()]); // END pipeline_setup_normalizer // START pipeline_test_normalizer use tokenizers::{NormalizedString, Normalizer}; let mut normalized = NormalizedString::from("Héllò hôw are ü?"); normalizer.normalize(&mut normalized)?; println!("{}", normalized.get()); // "Hello how are u?" // END pipeline_test_normalizer assert_eq!(normalized.get(), "Hello how are u?"); // START pipeline_replace_normalizer tokenizer.with_normalizer(normalizer); // END pipeline_replace_normalizer // START pipeline_setup_pre_tokenizer use tokenizers::pre_tokenizers::whitespace::Whitespace; use tokenizers::{OffsetReferential, OffsetType, PreTokenizedString, PreTokenizer}; let pre_tokenizer = Whitespace {}; let mut pre_tokenized = PreTokenizedString::from("Hello! How are you? I'm fine, thank you."); pre_tokenizer.pre_tokenize(&mut pre_tokenized)?; println!( "{:?}", pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte) ); // [("Hello", (0, 5), None), ("!", (5, 6), None), ("How", (7, 10), None), // ("are", (11, 14), None), ("you", (15, 18), None), ("?", (18, 19), None), // ("I", (20, 21), None), ("\'", (21, 22), None), ("m", (22, 23), None), // ("fine", (24, 28), None), (",", (28, 29), None), ("thank", (30, 35), None), // ("you", (36, 39), None), (".", (39, 40), None)] // END pipeline_setup_pre_tokenizer assert_eq!( pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte), vec![ ("Hello", (0, 5), &None), ("!", (5, 6), &None), ("How", (7, 10), &None), ("are", (11, 14), &None), ("you", (15, 18), &None), ("?", (18, 19), &None), ("I", (20, 21), &None), ("\'", (21, 22), &None), ("m", (22, 23), &None), ("fine", (24, 28), &None), (",", (28, 29), &None), ("thank", (30, 35), &None), ("you", (36, 39), &None), (".", (39, 40), &None) ] ); // START pipeline_combine_pre_tokenizer use tokenizers::pre_tokenizers::{digits::Digits, sequence::Sequence}; let pre_tokenizer = Sequence::new(vec![Whitespace {}.into(), Digits::new(true).into()]); let mut pre_tokenized = PreTokenizedString::from("Call 911!"); pre_tokenizer.pre_tokenize(&mut pre_tokenized)?; println!( "{:?}", pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte) ); // END pipeline_combine_pre_tokenizer assert_eq!( pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte), vec![ ("Call", (0, 4), &None), ("9", (5, 6), &None), ("1", (6, 7), &None), ("1", (7, 8), &None), ("!", (8, 9), &None) ] ); // START pipeline_replace_pre_tokenizer tokenizer.with_pre_tokenizer(pre_tokenizer); // END pipeline_replace_pre_tokenizer // START pipeline_setup_processor use tokenizers::processors::template::TemplateProcessing; tokenizer.with_post_processor( TemplateProcessing::builder() .try_single("[CLS] $A [SEP]") .unwrap() .try_pair("[CLS] $A [SEP] $B:1 [SEP]:1") .unwrap() .special_tokens(vec![("[CLS]", 1), ("[SEP]", 2)]) .build() .unwrap(), ); // END pipeline_setup_processor // START pipeline_test_decoding let output = tokenizer.encode("Hello, y'all! How are you 😁 ?", true)?; println!("{:?}", output.get_ids()); // [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2] let decoded = tokenizer.decode( &[1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2], true, )?; println!("{}", decoded); // "Hello , y ' all ! How are you ?" // END pipeline_test_decoding Ok(()) } #[test] #[ignore] fn train_pipeline_bert() -> tokenizers::Result<()> { // START bert_setup_tokenizer use tokenizers::models::wordpiece::WordPiece; use tokenizers::Tokenizer; let mut bert_tokenizer = Tokenizer::new( WordPiece::builder() .unk_token("[UNK]".to_string()) .build() .unwrap(), ); // END bert_setup_tokenizer // START bert_setup_normalizer use tokenizers::normalizers::utils::Sequence as NormalizerSequence; use tokenizers::normalizers::{strip::StripAccents, unicode::NFD, utils::Lowercase}; bert_tokenizer.with_normalizer(NormalizerSequence::new(vec![ NFD.into(), Lowercase.into(), StripAccents.into(), ])); // END bert_setup_normalizer // START bert_setup_pre_tokenizer use tokenizers::pre_tokenizers::whitespace::Whitespace; bert_tokenizer.with_pre_tokenizer(Whitespace {}); // END bert_setup_pre_tokenizer // START bert_setup_processor use tokenizers::processors::template::TemplateProcessing; bert_tokenizer.with_post_processor( TemplateProcessing::builder() .try_single("[CLS] $A [SEP]") .unwrap() .try_pair("[CLS] $A [SEP] $B:1 [SEP]:1") .unwrap() .special_tokens(vec![("[CLS]", 1), ("[SEP]", 2)]) .build() .unwrap(), ); // END bert_setup_processor // START bert_train_tokenizer use tokenizers::models::{wordpiece::WordPieceTrainer, TrainerWrapper}; let mut trainer: TrainerWrapper = WordPieceTrainer::builder() .vocab_size(30_522) .special_tokens(vec![ AddedToken::from("[UNK]", true), AddedToken::from("[CLS]", true), AddedToken::from("[SEP]", true), AddedToken::from("[PAD]", true), AddedToken::from("[MASK]", true), ]) .build() .into(); let files = vec![ "data/wikitext-103-raw/wiki.train.raw".into(), "data/wikitext-103-raw/wiki.test.raw".into(), "data/wikitext-103-raw/wiki.valid.raw".into(), ]; bert_tokenizer.train_from_files(&mut trainer, files)?; bert_tokenizer.save("data/bert-wiki.json", false)?; // END bert_train_tokenizer Ok(()) } #[test] fn pipeline_bert() -> tokenizers::Result<()> { let mut bert_tokenizer = Tokenizer::from_file("data/bert-wiki.json")?; // START bert_test_decoding let output = bert_tokenizer.encode("Welcome to the 🤗 Tokenizers library.", true)?; println!("{:?}", output.get_tokens()); // ["[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]"] let decoded = bert_tokenizer.decode(output.get_ids(), true)?; println!("{}", decoded); // "welcome to the tok ##eni ##zer ##s library ." // END bert_test_decoding assert_eq!( output.get_tokens(), &[ "[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]" ] ); assert_eq!(decoded, "welcome to the tok ##eni ##zer ##s library ."); // START bert_proper_decoding use tokenizers::decoders::wordpiece::WordPiece as WordPieceDecoder; bert_tokenizer.with_decoder(WordPieceDecoder::default()); let decoded = bert_tokenizer.decode(output.get_ids(), true)?; // "welcome to the tokenizers library." // END bert_proper_decoding assert_eq!(decoded, "welcome to the tokenizers library."); Ok(()) }
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/tests/training.rs
use tokenizers::models::bpe::BPE; use tokenizers::pre_tokenizers::whitespace::Whitespace; use tokenizers::{DecoderWrapper, NormalizerWrapper, PostProcessorWrapper, PreTokenizerWrapper}; use tokenizers::{Model, Tokenizer, TokenizerBuilder}; #[test] fn bpe_values_after_training() { let mut tokenizer = TokenizerBuilder::< BPE, NormalizerWrapper, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper, >::default() .with_model( BPE::builder() .unk_token("[UNK]".to_string()) .dropout(0.1) .build() .unwrap(), ) .build() .unwrap(); let mut trainer = tokenizer.get_model().get_trainer(); tokenizer .train_from_files(&mut trainer, vec!["./data/small.txt".to_string()]) .unwrap(); assert_eq!(tokenizer.get_model().dropout, Some(0.1)); assert_eq!(tokenizer.get_model().unk_token, Some("[UNK]".to_string())); } #[test] fn bpe_continuing_subword_prefix_error() { let mut tokenizer = TokenizerBuilder::< BPE, NormalizerWrapper, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper, >::default() .with_model( BPE::builder() .unk_token("[UNK]".to_string()) .continuing_subword_prefix("##".to_string()) .build() .unwrap(), ) .with_pre_tokenizer(Some(PreTokenizerWrapper::Whitespace(Whitespace {}))) .build() .unwrap(); let mut trainer = tokenizer.get_model().get_trainer(); tokenizer .train_from_files(&mut trainer, vec!["./data/small.txt".to_string()]) .unwrap(); tokenizer.save("tokenizer.json", true).unwrap(); let tokenizer = Tokenizer::from_file("tokenizer.json").unwrap(); assert_eq!(tokenizer.get_vocab_size(false), 1526); std::fs::remove_file("tokenizer.json").unwrap(); }
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/tests/from_pretrained.rs
#![cfg(feature = "http")] use tokenizers::{FromPretrainedParameters, Result, Tokenizer}; #[test] fn test_from_pretrained() -> Result<()> { let tokenizer = Tokenizer::from_pretrained("bert-base-cased", None)?; let encoding = tokenizer.encode("Hey there dear friend!", false)?; assert_eq!( encoding.get_tokens(), &["Hey", "there", "dear", "friend", "!"] ); Ok(()) } #[test] fn test_from_pretrained_revision() -> Result<()> { let tokenizer = Tokenizer::from_pretrained("anthony/tokenizers-test", None)?; let encoding = tokenizer.encode("Hey there dear friend!", false)?; assert_eq!( encoding.get_tokens(), &["hey", "there", "dear", "friend", "!"] ); let tokenizer = Tokenizer::from_pretrained( "anthony/tokenizers-test", Some(FromPretrainedParameters { revision: "gpt-2".to_string(), ..Default::default() }), )?; let encoding = tokenizer.encode("Hey there dear friend!", false)?; assert_eq!( encoding.get_tokens(), &["Hey", "Ġthere", "Ġdear", "Ġfriend", "!"] ); Ok(()) } #[test] fn test_from_pretrained_invalid_model() { let tokenizer = Tokenizer::from_pretrained("docs?", None); assert!(tokenizer.is_err()); } #[test] fn test_from_pretrained_invalid_revision() { let tokenizer = Tokenizer::from_pretrained( "bert-base-cased", Some(FromPretrainedParameters { revision: "gpt?".to_string(), ..Default::default() }), ); assert!(tokenizer.is_err()); }
0
hf_public_repos/tokenizers/tokenizers/tests
hf_public_repos/tokenizers/tokenizers/tests/common/mod.rs
use tokenizers::decoders::wordpiece::WordPiece as WordPieceDecoder; use tokenizers::models::bpe::BPE; use tokenizers::models::wordpiece::WordPiece; use tokenizers::normalizers::bert::BertNormalizer; use tokenizers::pre_tokenizers::bert::BertPreTokenizer; use tokenizers::pre_tokenizers::byte_level::ByteLevel; use tokenizers::processors::bert::BertProcessing; use tokenizers::tokenizer::{Model, Tokenizer}; #[allow(dead_code)] pub fn get_empty() -> Tokenizer { Tokenizer::new(BPE::default()) } #[allow(dead_code)] pub fn get_byte_level_bpe() -> BPE { BPE::from_file("data/gpt2-vocab.json", "data/gpt2-merges.txt") .build() .expect("Files not found, run `make test` to download these files") } #[allow(dead_code)] pub fn get_byte_level(add_prefix_space: bool, trim_offsets: bool) -> Tokenizer { let mut tokenizer = Tokenizer::new(get_byte_level_bpe()); tokenizer .with_pre_tokenizer(ByteLevel::default().add_prefix_space(add_prefix_space)) .with_decoder(ByteLevel::default()) .with_post_processor(ByteLevel::default().trim_offsets(trim_offsets)); tokenizer } #[allow(dead_code)] pub fn get_bert_wordpiece() -> WordPiece { WordPiece::from_file("data/bert-base-uncased-vocab.txt") .build() .expect("Files not found, run `make test` to download these files") } #[allow(dead_code)] pub fn get_bert() -> Tokenizer { let mut tokenizer = Tokenizer::new(get_bert_wordpiece()); let sep = tokenizer.get_model().token_to_id("[SEP]").unwrap(); let cls = tokenizer.get_model().token_to_id("[CLS]").unwrap(); tokenizer .with_normalizer(BertNormalizer::default()) .with_pre_tokenizer(BertPreTokenizer) .with_decoder(WordPieceDecoder::default()) .with_post_processor(BertProcessing::new( (String::from("[SEP]"), sep), (String::from("[CLS]"), cls), )); tokenizer }
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/src/lib.rs
#![warn(clippy::all)] #![allow(clippy::upper_case_acronyms)] #![doc(html_favicon_url = "https://huggingface.co/favicon.ico")] #![doc(html_logo_url = "https://huggingface.co/landing/assets/huggingface_logo.svg")] //! The core of `tokenizers`, written in Rust. //! Provides an implementation of today's most used tokenizers, with a focus on performance and //! versatility. //! //! # What is a Tokenizer //! //! A Tokenizer works as a pipeline, it processes some raw text as input and outputs an `Encoding`. //! The various steps of the pipeline are: //! //! 1. The `Normalizer`: in charge of normalizing the text. Common examples of normalization are //! the [unicode normalization standards](https://unicode.org/reports/tr15/#Norm_Forms), such as `NFD` or `NFKC`. //! More details about how to use the `Normalizers` are available on the //! [Hugging Face blog](https://huggingface.co/docs/tokenizers/components#normalizers) //! 2. The `PreTokenizer`: in charge of creating initial words splits in the text. The most common way of //! splitting text is simply on whitespace. //! 3. The `Model`: in charge of doing the actual tokenization. An example of a `Model` would be //! `BPE` or `WordPiece`. //! 4. The `PostProcessor`: in charge of post-processing the `Encoding` to add anything relevant //! that, for example, a language model would need, such as special tokens. //! //! ## Loading a pretrained tokenizer from the Hub //! ``` //! use tokenizers::tokenizer::{Result, Tokenizer}; //! //! fn main() -> Result<()> { //! # #[cfg(feature = "http")] //! # { //! let tokenizer = Tokenizer::from_pretrained("bert-base-cased", None)?; //! //! let encoding = tokenizer.encode("Hey there!", false)?; //! println!("{:?}", encoding.get_tokens()); //! # } //! Ok(()) //! } //! ``` //! //! ## Deserialization and tokenization example //! //! ```no_run //! use tokenizers::tokenizer::{Result, Tokenizer, EncodeInput}; //! use tokenizers::models::bpe::BPE; //! //! fn main() -> Result<()> { //! let bpe_builder = BPE::from_file("./path/to/vocab.json", "./path/to/merges.txt"); //! let bpe = bpe_builder //! .dropout(0.1) //! .unk_token("[UNK]".into()) //! .build()?; //! //! let mut tokenizer = Tokenizer::new(bpe); //! //! let encoding = tokenizer.encode("Hey there!", false)?; //! println!("{:?}", encoding.get_tokens()); //! //! Ok(()) //! } //! ``` //! //! ## Training and serialization example //! //! ```no_run //! use tokenizers::decoders::DecoderWrapper; //! use tokenizers::models::bpe::{BpeTrainerBuilder, BPE}; //! use tokenizers::normalizers::{strip::Strip, unicode::NFC, utils::Sequence, NormalizerWrapper}; //! use tokenizers::pre_tokenizers::byte_level::ByteLevel; //! use tokenizers::pre_tokenizers::PreTokenizerWrapper; //! use tokenizers::processors::PostProcessorWrapper; //! use tokenizers::{AddedToken, Model, Result, TokenizerBuilder}; //! //! use std::path::Path; //! //! fn main() -> Result<()> { //! let vocab_size: usize = 100; //! //! let mut trainer = BpeTrainerBuilder::new() //! .show_progress(true) //! .vocab_size(vocab_size) //! .min_frequency(0) //! .special_tokens(vec![ //! AddedToken::from(String::from("<s>"), true), //! AddedToken::from(String::from("<pad>"), true), //! AddedToken::from(String::from("</s>"), true), //! AddedToken::from(String::from("<unk>"), true), //! AddedToken::from(String::from("<mask>"), true), //! ]) //! .build(); //! //! let mut tokenizer = TokenizerBuilder::new() //! .with_model(BPE::default()) //! .with_normalizer(Some(Sequence::new(vec![ //! Strip::new(true, true).into(), //! NFC.into(), //! ]))) //! .with_pre_tokenizer(Some(ByteLevel::default())) //! .with_post_processor(Some(ByteLevel::default())) //! .with_decoder(Some(ByteLevel::default())) //! .build()?; //! //! let pretty = false; //! tokenizer //! .train_from_files( //! &mut trainer, //! vec!["path/to/vocab.txt".to_string()], //! )? //! .save("tokenizer.json", pretty)?; //! //! Ok(()) //! } //! ``` //! //! # Additional information //! //! - tokenizers is designed to leverage CPU parallelism when possible. The level of parallelism is determined //! by the total number of core/threads your CPU provides but this can be tuned by setting the `RAYON_RS_NUM_THREADS` //! environment variable. As an example setting `RAYON_RS_NUM_THREADS=4` will allocate a maximum of 4 threads. //! **_Please note this behavior may evolve in the future_** //! //! # Features //! **progressbar**: The progress bar visualization is enabled by default. It might be disabled if //! compilation for certain targets is not supported by the [termios](https://crates.io/crates/termios) //! dependency of the [indicatif](https://crates.io/crates/indicatif) progress bar. #[macro_use] extern crate log; #[macro_use] extern crate lazy_static; #[macro_use] extern crate derive_builder; #[macro_use] pub mod utils; pub mod decoders; pub mod models; pub mod normalizers; pub mod pre_tokenizers; pub mod processors; pub mod tokenizer; // Re-export from tokenizer pub use tokenizer::*; // Re-export also parallelism utils pub use utils::parallelism; // Re-export for from_pretrained #[cfg(feature = "http")] pub use utils::from_pretrained::FromPretrainedParameters;
0
hf_public_repos/tokenizers/tokenizers
hf_public_repos/tokenizers/tokenizers/src/cli.rs
//! //! This is the CLI binary for the Tokenizers project //! use clap::{Parser, Subcommand}; use std::io::{self, BufRead, Write}; use tokenizers::models::bpe::BPE; use tokenizers::pre_tokenizers::byte_level::ByteLevel; use tokenizers::tokenizer::{AddedToken, Result}; use tokenizers::Tokenizer; /// Generate custom Tokenizers or use existing ones #[derive(Parser, Debug)] #[command(author, version)] struct Args { #[command(subcommand)] command: Command, } #[derive(Subcommand, Debug)] enum Command { Shell { /// Path to the vocab.json file vocab: String, /// Path to the merges.txt file merges: String, }, } fn shell(vocab: &str, merges: &str) -> Result<()> { let bpe = BPE::from_file(vocab, merges).build()?; let mut tokenizer = Tokenizer::new(bpe); tokenizer .with_pre_tokenizer(ByteLevel::default()) .with_decoder(ByteLevel::default()); tokenizer.add_tokens(&[AddedToken::from(String::from("ing"), false).single_word(false)]); tokenizer .add_special_tokens(&[AddedToken::from(String::from("[ENT]"), true).single_word(true)]); let stdin = io::stdin(); let mut handle = stdin.lock(); let mut buffer = String::new(); loop { buffer.clear(); print!("\nEnter some text to tokenize:\n> "); io::stdout().flush()?; handle.read_line(&mut buffer)?; let buffer = buffer.trim_end(); let timer = std::time::Instant::now(); let encoded = tokenizer.encode(buffer.to_owned(), false)?; let elapsed = timer.elapsed(); println!("\nInput:\t\t{}", buffer); println!("Tokens:\t\t{:?}", encoded.get_tokens()); println!("IDs:\t\t{:?}", encoded.get_ids()); println!("Offsets:\t{:?}", encoded.get_offsets()); println!( "Decoded:\t{}", tokenizer.decode(encoded.get_ids(), true).unwrap() ); println!("Tokenized in {:?}", elapsed); } } fn main() -> Result<()> { let args = Args::parse(); match args.command { Command::Shell { vocab, merges } => shell(&vocab, &merges), } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/normalizers/precompiled.rs
use crate::tokenizer::{NormalizedString, Normalizer, Result}; pub use spm_precompiled::Precompiled; use std::cmp::Ordering; use unicode_segmentation::UnicodeSegmentation; fn replace(transformations: &mut Vec<(char, isize)>, old_part: &str, new_part: &str) { let old_count = old_part.chars().count() as isize; let new_count = new_part.chars().count() as isize; let diff = new_count - old_count; // If we are just replacing characters, all changes should be == 0 transformations.extend(new_part.chars().map(|c| (c, 0))); match diff.cmp(&0) { // If we are adding some characters, the last DIFF characters shoud be == 1 Ordering::Greater => { transformations .iter_mut() .rev() .take(diff as usize) .for_each(|(_, cs)| *cs = 1); } // If we are removing some characters, the last one should include the diff Ordering::Less => { if let Some((_, cs)) = transformations.last_mut() { *cs += diff; } } _ => {} } } impl Normalizer for Precompiled { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { let mut transformations = Vec::with_capacity(normalized.get().len()); // Future reader. From @Narsil. // Yes, this is weird, // Yes, this seems broken // No, I don't know why Google did this. // If you question this code, check this normalizer against // XNLI database (all languages) with Unigram model against // Mbart, XLMRoberta *AND* Marian. If you don't get 100% or // break a single test. // You don't pass. let mut modified = false; normalized.get().graphemes(true).for_each(|grapheme| { if grapheme.len() < 6 { if let Some(norm) = self.transform(grapheme) { modified = true; replace(&mut transformations, grapheme, norm); return; } } for (char_index, c) in grapheme.char_indices() { let part = &grapheme[char_index..char_index + c.len_utf8()]; if let Some(norm) = self.transform(part) { modified = true; replace(&mut transformations, part, norm); } else { transformations.push((c, 0)); } } }); if modified { normalized.transform(transformations.into_iter(), 0); } Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn expansion_followed_by_removal() { // Simulate transformations from "™\x1eg" to "TMg" let mut transformations = vec![]; let mut n = NormalizedString::from("™\x1eg"); replace(&mut transformations, "™", "TM"); replace(&mut transformations, "\x1e", ""); transformations.push(('g', 0)); n.transform(transformations.into_iter(), 0); assert_eq!(n.get(), "TMg"); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/normalizers/prepend.rs
use crate::tokenizer::{NormalizedString, Normalizer, Result}; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(tag = "type")] pub struct Prepend { pub prepend: String, } impl Prepend { pub fn new(prepend: String) -> Self { Self { prepend } } } impl Normalizer for Prepend { /// Strip the normalized string inplace fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { if !normalized.is_empty() { normalized.prepend(&self.prepend); } Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_prepend() { let original = "Hello"; let normalized = "▁Hello"; assert_ne!(original, normalized); let mut n = NormalizedString::from(original); let prepend = Prepend::new("▁".to_string()); prepend.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); assert_eq!( n, NormalizedString::new( original.to_string(), normalized.to_string(), vec![ (0, 1), (0, 1), (0, 1), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5) ], 0 ) ); assert_eq!( n.alignments_original(), vec![(0, 4), (4, 5), (5, 6), (6, 7), (7, 8)] ); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/normalizers/mod.rs
pub mod bert; pub mod precompiled; pub mod prepend; pub mod replace; pub mod strip; pub mod unicode; pub mod utils; pub use crate::normalizers::bert::BertNormalizer; pub use crate::normalizers::precompiled::Precompiled; pub use crate::normalizers::prepend::Prepend; pub use crate::normalizers::replace::Replace; pub use crate::normalizers::strip::{Strip, StripAccents}; pub use crate::normalizers::unicode::{Nmt, NFC, NFD, NFKC, NFKD}; pub use crate::normalizers::utils::{Lowercase, Sequence}; use serde::{Deserialize, Serialize}; use crate::{NormalizedString, Normalizer}; /// Wrapper for known Normalizers. #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(untagged)] pub enum NormalizerWrapper { BertNormalizer(BertNormalizer), StripNormalizer(Strip), StripAccents(StripAccents), NFC(NFC), NFD(NFD), NFKC(NFKC), NFKD(NFKD), Sequence(Sequence), Lowercase(Lowercase), Nmt(Nmt), Precompiled(Precompiled), Replace(Replace), Prepend(Prepend), } impl Normalizer for NormalizerWrapper { fn normalize(&self, normalized: &mut NormalizedString) -> crate::Result<()> { match self { Self::BertNormalizer(bn) => bn.normalize(normalized), Self::StripNormalizer(sn) => sn.normalize(normalized), Self::StripAccents(sn) => sn.normalize(normalized), Self::NFC(nfc) => nfc.normalize(normalized), Self::NFD(nfd) => nfd.normalize(normalized), Self::NFKC(nfkc) => nfkc.normalize(normalized), Self::NFKD(nfkd) => nfkd.normalize(normalized), Self::Sequence(sequence) => sequence.normalize(normalized), Self::Lowercase(lc) => lc.normalize(normalized), Self::Nmt(lc) => lc.normalize(normalized), Self::Precompiled(lc) => lc.normalize(normalized), Self::Replace(lc) => lc.normalize(normalized), Self::Prepend(lc) => lc.normalize(normalized), } } } impl_enum_from!(BertNormalizer, NormalizerWrapper, BertNormalizer); impl_enum_from!(NFKD, NormalizerWrapper, NFKD); impl_enum_from!(NFKC, NormalizerWrapper, NFKC); impl_enum_from!(NFC, NormalizerWrapper, NFC); impl_enum_from!(NFD, NormalizerWrapper, NFD); impl_enum_from!(Strip, NormalizerWrapper, StripNormalizer); impl_enum_from!(StripAccents, NormalizerWrapper, StripAccents); impl_enum_from!(Sequence, NormalizerWrapper, Sequence); impl_enum_from!(Lowercase, NormalizerWrapper, Lowercase); impl_enum_from!(Nmt, NormalizerWrapper, Nmt); impl_enum_from!(Precompiled, NormalizerWrapper, Precompiled); impl_enum_from!(Replace, NormalizerWrapper, Replace); impl_enum_from!(Prepend, NormalizerWrapper, Prepend);
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/normalizers/unicode.rs
use crate::tokenizer::{NormalizedString, Normalizer, Result}; use crate::utils::macro_rules_attribute; #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct NFD; impl Normalizer for NFD { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.nfd(); Ok(()) } } #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct NFKD; impl Normalizer for NFKD { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.nfkd(); Ok(()) } } #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct NFC; impl Normalizer for NFC { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.nfc(); Ok(()) } } #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct NFKC; impl Normalizer for NFKC { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.nfkc(); Ok(()) } } fn do_nmt(normalized: &mut NormalizedString) { // Ascii Control characters normalized .filter(|c| { !matches!( c as u32, 0x0001..=0x0008 | 0x000B | 0x000E..=0x001F | 0x007F | 0x008F | 0x009F ) }) // Other code points considered as whitespace. .map(|c| match c as u32 { 0x0009 => ' ', 0x000A => ' ', 0x000C => ' ', 0x000D => ' ', 0x1680 => ' ', 0x200B..=0x200F => ' ', 0x2028 => ' ', 0x2029 => ' ', 0x2581 => ' ', 0xFEFF => ' ', 0xFFFD => ' ', _ => c, }); } #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct Nmt; impl Normalizer for Nmt { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { do_nmt(normalized); Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_nfkc() { let original = "\u{fb01}".to_string(); let normalized = "fi".to_string(); let mut n = NormalizedString::from(original.clone()); NFKC.normalize(&mut n).unwrap(); assert_eq!( n, NormalizedString::new(original, normalized, vec![(0, 3), (0, 3)], 0) ); assert_eq!(n.alignments_original(), vec![(0, 2), (0, 2), (0, 2)]); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/normalizers/utils.rs
use serde::{Deserialize, Serialize}; use crate::normalizers::NormalizerWrapper; use crate::tokenizer::{NormalizedString, Normalizer, Result}; use crate::utils::macro_rules_attribute; #[derive(Clone, Deserialize, Debug, Serialize)] #[serde(tag = "type")] /// Allows concatenating multiple other Normalizer as a Sequence. /// All the normalizers run in sequence in the given order against the same NormalizedString. pub struct Sequence { normalizers: Vec<NormalizerWrapper>, } impl Sequence { pub fn new(normalizers: Vec<NormalizerWrapper>) -> Self { Self { normalizers } } pub fn get_normalizers(&self) -> &[NormalizerWrapper] { &self.normalizers } pub fn get_normalizers_mut(&mut self) -> &mut [NormalizerWrapper] { &mut self.normalizers } } impl Normalizer for Sequence { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { for normalizer in &self.normalizers { normalizer.normalize(normalized)?; } Ok(()) } } /// Lowercases the input #[derive(Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct Lowercase; impl Normalizer for Lowercase { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.lowercase(); Ok(()) } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/normalizers/replace.rs
use crate::tokenizer::pattern::Pattern; use crate::tokenizer::Decoder; use crate::tokenizer::{NormalizedString, Normalizer, Result}; use crate::utils::SysRegex; use serde::{Deserialize, Serialize}; /// Represents the different patterns that `Replace` can use #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)] pub enum ReplacePattern { String(String), Regex(String), } impl From<String> for ReplacePattern { fn from(v: String) -> Self { Self::String(v) } } impl From<&str> for ReplacePattern { fn from(v: &str) -> Self { Self::String(v.to_owned()) } } /// We use this custom deserializer to provide the value for `regex` for `Replace` #[doc(hidden)] #[derive(Deserialize)] #[serde(tag = "type")] struct ReplaceDeserializer { pattern: ReplacePattern, content: String, } impl std::convert::TryFrom<ReplaceDeserializer> for Replace { type Error = Box<dyn std::error::Error + Send + Sync>; fn try_from(v: ReplaceDeserializer) -> Result<Self> { Self::new(v.pattern, v.content) } } /// This normalizer will take a `pattern` (for now only a String) /// and replace every occurrence with `content`. #[derive(Debug, Serialize, Deserialize)] #[serde(tag = "type", try_from = "ReplaceDeserializer")] pub struct Replace { pattern: ReplacePattern, content: String, #[serde(skip)] regex: SysRegex, } impl Clone for Replace { fn clone(&self) -> Self { Self::new(self.pattern.clone(), &self.content).unwrap() } } impl PartialEq for Replace { fn eq(&self, other: &Self) -> bool { self.pattern == other.pattern && self.content == other.content } } impl Replace { pub fn new<I: Into<ReplacePattern>, C: Into<String>>(pattern: I, content: C) -> Result<Self> { let pattern: ReplacePattern = pattern.into(); let regex = match &pattern { ReplacePattern::String(s) => SysRegex::new(&regex::escape(s))?, ReplacePattern::Regex(r) => SysRegex::new(r)?, }; Ok(Self { pattern, content: content.into(), regex, }) } } impl Normalizer for Replace { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.replace(&self.regex, &self.content) } } impl Decoder for Replace { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { tokens .into_iter() .map(|token| -> Result<String> { let mut new_token = "".to_string(); for ((start, stop), is_match) in (&self.regex).find_matches(&token)? { if is_match { new_token.push_str(&self.content); } else { new_token.push_str(&token[start..stop]); } } Ok(new_token) }) .collect() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_replace() { let original = "This is a ''test''"; let normalized = "This is a \"test\""; let mut n = NormalizedString::from(original); Replace::new("''", "\"").unwrap().normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn test_replace_regex() { let original = "This is a test"; let normalized = "This is a test"; let mut n = NormalizedString::from(original); Replace::new(ReplacePattern::Regex(r"\s+".into()), ' ') .unwrap() .normalize(&mut n) .unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn serialization() { let replace = Replace::new("Hello", "Hey").unwrap(); let replace_s = r#"{"type":"Replace","pattern":{"String":"Hello"},"content":"Hey"}"#; assert_eq!(serde_json::to_string(&replace).unwrap(), replace_s); assert_eq!(serde_json::from_str::<Replace>(replace_s).unwrap(), replace); let replace = Replace::new(ReplacePattern::Regex(r"\s+".into()), ' ').unwrap(); let replace_s = r#"{"type":"Replace","pattern":{"Regex":"\\s+"},"content":" "}"#; assert_eq!(serde_json::to_string(&replace).unwrap(), replace_s); assert_eq!(serde_json::from_str::<Replace>(replace_s).unwrap(), replace); } #[test] fn test_replace_decode() { let original = vec!["hello".to_string(), "_hello".to_string()]; let replace = Replace::new("_", " ").unwrap(); assert_eq!( replace.decode_chain(original).unwrap(), vec!["hello", " hello"] ); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/normalizers/strip.rs
use crate::tokenizer::{NormalizedString, Normalizer, Result}; use crate::utils::macro_rules_attribute; use serde::{Deserialize, Serialize}; use unicode_normalization_alignments::char::is_combining_mark; #[derive(Copy, Clone, Debug, Deserialize, Serialize)] #[serde(tag = "type")] #[non_exhaustive] pub struct Strip { pub strip_left: bool, pub strip_right: bool, } impl Strip { pub fn new(strip_left: bool, strip_right: bool) -> Self { Self { strip_left, strip_right, } } } impl Normalizer for Strip { /// Strip the normalized string inplace fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { if self.strip_left && self.strip_right { // Fast path normalized.strip(); } else { if self.strip_left { normalized.lstrip(); } if self.strip_right { normalized.rstrip(); } } Ok(()) } } // This normalizer removes combining marks from a normalized string // It's different from unidecode as it does not attempt to modify // non ascii languages. #[derive(Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct StripAccents; impl Normalizer for StripAccents { /// Strip the normalized string inplace fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.filter(|c| !is_combining_mark(c)); Ok(()) } } #[cfg(test)] mod tests { use super::*; use crate::normalizer::NormalizedString; use crate::normalizers::Lowercase; use crate::normalizers::NFKD; use unicode_normalization_alignments::UnicodeNormalization; #[test] fn test_strip_accents() { // Unicode combining char let original: String = "Me llamó".nfkd().map(|(c, _)| c).collect(); let normalized = "Me llamo"; assert_ne!(original, normalized); let mut n = NormalizedString::from(original); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); // Ignores regular ascii let original = "Me llamo"; let normalized = "Me llamo"; assert_eq!(original, normalized); let mut n = NormalizedString::from(original); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); // Does not change chinese let original: String = "这很简单".nfkd().map(|(c, _)| c).collect(); let normalized = "这很简单"; assert_eq!(original, normalized); let mut n = NormalizedString::from(original); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn test_vietnamese_bug() { let original: String = "ậ…".to_string(); let normalized = "a...".to_string(); assert_ne!(original, normalized); let mut n = NormalizedString::from(original); NFKD.normalize(&mut n).unwrap(); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); Lowercase.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); let original: String = "Cụ thể, bạn sẽ tham gia một nhóm các giám đốc điều hành tổ chức, các nhà lãnh đạo doanh nghiệp, các học giả, chuyên gia phát triển và tình nguyện viên riêng biệt trong lĩnh vực phi lợi nhuận…".to_string(); let normalized = "cu the, ban se tham gia mot nhom cac giam đoc đieu hanh to chuc, cac nha lanh đao doanh nghiep, cac hoc gia, chuyen gia phat trien va tinh nguyen vien rieng biet trong linh vuc phi loi nhuan...".to_string(); let mut n = NormalizedString::from(original); NFKD.normalize(&mut n).unwrap(); StripAccents.normalize(&mut n).unwrap(); Lowercase.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn test_thai_bug() { let original = "ำน\u{e49}ำ3ลำ".to_string(); let normalized = "านา3ลา".to_string(); assert_ne!(original, normalized); let mut n = NormalizedString::from(original); NFKD.normalize(&mut n).unwrap(); StripAccents.normalize(&mut n).unwrap(); Lowercase.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn test_strip_accents_multiple() { let original = "e\u{304}\u{304}\u{304}o"; let normalized = "eo"; assert_ne!(original, normalized); let mut n = NormalizedString::from(original); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); assert_eq!( n, NormalizedString::new( original.to_string(), normalized.to_string(), vec![(0, 1), (7, 8)], 0 ) ); assert_eq!( n.alignments_original(), vec![ (0, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 2) ] ); } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/normalizers/bert.rs
use crate::tokenizer::{NormalizedString, Normalizer, Result}; use serde::{Deserialize, Serialize}; use unicode_categories::UnicodeCategories; /// Checks whether a character is whitespace fn is_whitespace(c: char) -> bool { // These are technically control characters but we count them as whitespace match c { '\t' | '\n' | '\r' => true, _ => c.is_whitespace(), } } /// Checks whether a character is a control character fn is_control(c: char) -> bool { // These are technically control characters but we count them as whitespace match c { '\t' | '\n' | '\r' => false, // The definition of `is_control` here is quite large and contains also // Cc, Cf, Cn or Co // cf. https://unicode.org/reports/tr44/ (Table 12) _ => c.is_other(), } } /// Checks whether a character is chinese /// This defines a "chinese character" as anything in the CJK Unicode block: /// https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) /// /// Note that the CJK Unicode block is NOT all Japanese and Korean characters, /// despite its name. The modern Korean Hangul alphabet is a different block, /// as is Japanese Hiragana and Katakana. Those alphabets are used to write /// space-separated words, so they are not treated specially and handled /// like for all of the other languages. fn is_chinese_char(c: char) -> bool { matches!( c as usize, 0x4E00..=0x9FFF | 0x3400..=0x4DBF | 0x20000..=0x2A6DF | 0x2A700..=0x2B73F | 0x2B740..=0x2B81F | 0x2B920..=0x2CEAF | 0xF900..=0xFAFF | 0x2F800..=0x2FA1F ) } #[derive(Copy, Clone, Debug, Deserialize, Serialize)] #[serde(tag = "type")] #[non_exhaustive] pub struct BertNormalizer { /// Whether to do the bert basic cleaning: /// 1. Remove any control characters /// 2. Replace all sorts of whitespace by the classic one ` ` pub clean_text: bool, /// Whether to put spaces around chinese characters so they get split pub handle_chinese_chars: bool, /// Whether to strip accents pub strip_accents: Option<bool>, /// Whether to lowercase the input pub lowercase: bool, } impl Default for BertNormalizer { fn default() -> Self { Self { clean_text: true, handle_chinese_chars: true, strip_accents: None, lowercase: true, } } } impl BertNormalizer { pub fn new( clean_text: bool, handle_chinese_chars: bool, strip_accents: Option<bool>, lowercase: bool, ) -> Self { Self { clean_text, handle_chinese_chars, strip_accents, lowercase, } } fn do_clean_text(&self, normalized: &mut NormalizedString) { normalized .filter(|c| !(c as usize == 0 || c as usize == 0xfffd || is_control(c))) .map(|c| if is_whitespace(c) { ' ' } else { c }); } fn do_handle_chinese_chars(&self, normalized: &mut NormalizedString) { let mut new_chars: Vec<(char, isize)> = vec![]; normalized.for_each(|c| { if is_chinese_char(c) { new_chars.extend([(' ', 0), (c, 1), (' ', 1)]); } else { new_chars.push((c, 0)); } }); normalized.transform(new_chars.into_iter(), 0); } fn do_strip_accents(&self, normalized: &mut NormalizedString) { normalized.nfd().filter(|c| !c.is_mark_nonspacing()); } fn do_lowercase(&self, normalized: &mut NormalizedString) { normalized.lowercase(); } } impl Normalizer for BertNormalizer { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { if self.clean_text { self.do_clean_text(normalized); } if self.handle_chinese_chars { self.do_handle_chinese_chars(normalized); } let strip_accents = self.strip_accents.unwrap_or(self.lowercase); if strip_accents { self.do_strip_accents(normalized); } if self.lowercase { self.do_lowercase(normalized); } Ok(()) } }
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/models/mod.rs
//! Popular tokenizer models. pub mod bpe; pub mod unigram; pub mod wordlevel; pub mod wordpiece; use std::collections::HashMap; use std::path::{Path, PathBuf}; use serde::{Deserialize, Serialize, Serializer}; use crate::models::bpe::{BpeTrainer, BPE}; use crate::models::unigram::{Unigram, UnigramTrainer}; use crate::models::wordlevel::{WordLevel, WordLevelTrainer}; use crate::models::wordpiece::{WordPiece, WordPieceTrainer}; use crate::{AddedToken, Model, Result, Token, Trainer}; /// Wraps a vocab mapping (ID -> token) to a struct that will be serialized in order /// of token ID, smallest to largest. struct OrderedVocabIter<'a> { vocab_r: &'a HashMap<u32, String>, } impl<'a> OrderedVocabIter<'a> { fn new(vocab_r: &'a HashMap<u32, String>) -> Self { Self { vocab_r } } } impl<'a> Serialize for OrderedVocabIter<'a> { fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> where S: Serializer, { // There could be holes so max + 1 is more correct than vocab_r.len() let mut holes = vec![]; let result = if let Some(max) = self.vocab_r.iter().map(|(key, _)| key).max() { let iter = (0..*max + 1).filter_map(|i| { if let Some(token) = self.vocab_r.get(&i){ Some((token, i)) }else{ holes.push(i); None } }); serializer.collect_map(iter) } else { serializer.collect_map(std::iter::empty::<(&str, u32)>()) }; if !holes.is_empty(){ warn!("The OrderedVocab you are attempting to save contains holes for indices {:?}, your vocabulary could be corrupted !", holes); println!("The OrderedVocab you are attempting to save contains holes for indices {:?}, your vocabulary could be corrupted !", holes); } result } } #[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] #[serde(untagged)] pub enum ModelWrapper { BPE(BPE), // WordPiece must stay before WordLevel here for deserialization (for retrocompatibility // with the versions not including the "type"), since WordLevel is a subset of WordPiece WordPiece(WordPiece), WordLevel(WordLevel), Unigram(Unigram), } impl_enum_from!(WordLevel, ModelWrapper, WordLevel); impl_enum_from!(WordPiece, ModelWrapper, WordPiece); impl_enum_from!(BPE, ModelWrapper, BPE); impl_enum_from!(Unigram, ModelWrapper, Unigram); impl Model for ModelWrapper { type Trainer = TrainerWrapper; fn tokenize(&self, tokens: &str) -> Result<Vec<Token>> { match self { Self::WordLevel(t) => t.tokenize(tokens), Self::WordPiece(t) => t.tokenize(tokens), Self::BPE(t) => t.tokenize(tokens), Self::Unigram(t) => t.tokenize(tokens), } } fn token_to_id(&self, token: &str) -> Option<u32> { match self { Self::WordLevel(t) => t.token_to_id(token), Self::WordPiece(t) => t.token_to_id(token), Self::BPE(t) => t.token_to_id(token), Self::Unigram(t) => t.token_to_id(token), } } fn id_to_token(&self, id: u32) -> Option<String> { match self { Self::WordLevel(t) => t.id_to_token(id), Self::WordPiece(t) => t.id_to_token(id), Self::BPE(t) => t.id_to_token(id), Self::Unigram(t) => t.id_to_token(id), } } fn get_vocab(&self) -> HashMap<String, u32> { match self { Self::WordLevel(t) => t.get_vocab(), Self::WordPiece(t) => t.get_vocab(), Self::BPE(t) => t.get_vocab(), Self::Unigram(t) => t.get_vocab(), } } fn get_vocab_size(&self) -> usize { match self { Self::WordLevel(t) => t.get_vocab_size(), Self::WordPiece(t) => t.get_vocab_size(), Self::BPE(t) => t.get_vocab_size(), Self::Unigram(t) => t.get_vocab_size(), } } fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> { match self { Self::WordLevel(t) => t.save(folder, name), Self::WordPiece(t) => t.save(folder, name), Self::BPE(t) => t.save(folder, name), Self::Unigram(t) => t.save(folder, name), } } fn get_trainer(&self) -> Self::Trainer { match self { Self::WordLevel(t) => t.get_trainer().into(), Self::WordPiece(t) => t.get_trainer().into(), Self::BPE(t) => t.get_trainer().into(), Self::Unigram(t) => t.get_trainer().into(), } } } #[derive(Serialize, Deserialize)] pub enum TrainerWrapper { BpeTrainer(BpeTrainer), WordPieceTrainer(WordPieceTrainer), WordLevelTrainer(WordLevelTrainer), UnigramTrainer(UnigramTrainer), } impl Trainer for TrainerWrapper { type Model = ModelWrapper; fn should_show_progress(&self) -> bool { match self { Self::BpeTrainer(bpe) => bpe.should_show_progress(), Self::WordPieceTrainer(wpt) => wpt.should_show_progress(), Self::WordLevelTrainer(wpt) => wpt.should_show_progress(), Self::UnigramTrainer(wpt) => wpt.should_show_progress(), } } fn train(&self, model: &mut ModelWrapper) -> Result<Vec<AddedToken>> { match self { Self::BpeTrainer(t) => match model { ModelWrapper::BPE(bpe) => t.train(bpe), _ => Err("BpeTrainer can only train a BPE".into()), }, Self::WordPieceTrainer(t) => match model { ModelWrapper::WordPiece(wp) => t.train(wp), _ => Err("WordPieceTrainer can only train a WordPiece".into()), }, Self::WordLevelTrainer(t) => match model { ModelWrapper::WordLevel(wl) => t.train(wl), _ => Err("WordLevelTrainer can only train a WordLevel".into()), }, Self::UnigramTrainer(t) => match model { ModelWrapper::Unigram(u) => t.train(u), _ => Err("UnigramTrainer can only train a Unigram".into()), }, } } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { match self { Self::BpeTrainer(bpe) => bpe.feed(iterator, process), Self::WordPieceTrainer(wpt) => wpt.feed(iterator, process), Self::WordLevelTrainer(wpt) => wpt.feed(iterator, process), Self::UnigramTrainer(wpt) => wpt.feed(iterator, process), } } } impl_enum_from!(BpeTrainer, TrainerWrapper, BpeTrainer); impl_enum_from!(WordPieceTrainer, TrainerWrapper, WordPieceTrainer); impl_enum_from!(UnigramTrainer, TrainerWrapper, UnigramTrainer); impl_enum_from!(WordLevelTrainer, TrainerWrapper, WordLevelTrainer); #[cfg(test)] mod tests { use super::*; #[test] fn trainer_wrapper_train_model_wrapper() { let trainer = TrainerWrapper::BpeTrainer(BpeTrainer::default()); let mut model = ModelWrapper::Unigram(Unigram::default()); let result = trainer.train(&mut model); assert!(result.is_err()); } #[test] fn incomplete_ordered_vocab() { let vocab_r: HashMap<u32, String> = HashMap::from([(0, "Hi".to_string()), (2, "There".to_string())]); let ordered = OrderedVocabIter::new(&vocab_r); let serialized = serde_json::to_string(&ordered).unwrap(); assert_eq!(serialized, "{\"Hi\":0,\"There\":2}"); } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/bpe/mod.rs
//! [Byte Pair Encoding](https://www.aclweb.org/anthology/P16-1162/) model. use std::{iter, mem}; mod model; mod serialization; pub mod trainer; mod word; type Pair = (u32, u32); /// Errors that can be encountered while using or constructing a `BPE` model. #[derive(thiserror::Error, Debug)] pub enum Error { /// An error encountered while reading files mainly. #[error("IoError: {0}")] Io(#[from] std::io::Error), /// An error forwarded from Serde, while parsing JSON #[error("JsonError: {0}")] JsonError(#[from] serde_json::Error), /// When the vocab.json file is in the wrong format #[error("Bad vocabulary json file")] BadVocabulary, /// When the merges.txt file is in the wrong format. This error holds the line /// number of the line that caused the error. #[error("Merges text file invalid at line {0}")] BadMerges(usize), /// If a token found in merges, is not in the vocab #[error("Token `{0}` out of vocabulary")] MergeTokenOutOfVocabulary(String), /// If the provided unk token is out of vocabulary #[error("Unk token `{0}` not found in the vocabulary")] UnkTokenOutOfVocabulary(String), /// Dropout not between 0 and 1. #[error("Dropout should be between 0 and 1")] InvalidDropout, } /// Provides access to the `FirstLastIterator` to any Iterator pub(crate) trait WithFirstLastIterator: Iterator + Sized { fn with_first_and_last(self) -> FirstLastIterator<Self>; } impl<I> WithFirstLastIterator for I where I: Iterator, { fn with_first_and_last(self) -> FirstLastIterator<Self> { FirstLastIterator { first: true, iter: self.peekable(), } } } /// Provides information about whether an item is the first and/or the last of the iterator pub(crate) struct FirstLastIterator<I> where I: Iterator, { first: bool, iter: iter::Peekable<I>, } impl<I> Iterator for FirstLastIterator<I> where I: Iterator, { /// (is_first, is_last, item) type Item = (bool, bool, I::Item); fn next(&mut self) -> Option<Self::Item> { let first = mem::replace(&mut self.first, false); self.iter .next() .map(|e| (first, self.iter.peek().is_none(), e)) } } // Re-export pub use model::*; pub use trainer::*; use word::*;
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/bpe/model.rs
use super::{super::OrderedVocabIter, trainer::BpeTrainer, Error, Pair, Word}; use crate::tokenizer::{Model, Result, Token}; use crate::utils::cache::{Cache, DEFAULT_CACHE_CAPACITY}; use crate::utils::iter::ResultShunt; use serde_json::Value; use std::borrow::Cow; use std::{ collections::HashMap, fs::File, io::prelude::*, io::{BufRead, BufReader}, path::{Path, PathBuf}, }; pub type Vocab = HashMap<String, u32>; type VocabR = HashMap<u32, String>; pub type MergeMap = HashMap<Pair, (u32, u32)>; pub type Merges = Vec<(String, String)>; struct Config { files: Option<(String, String)>, vocab: Vocab, merges: Merges, cache_capacity: usize, dropout: Option<f32>, unk_token: Option<String>, continuing_subword_prefix: Option<String>, end_of_word_suffix: Option<String>, fuse_unk: bool, byte_fallback: bool, } /// A `BpeBuilder` can be used to create a `BPE` model with a custom configuration. pub struct BpeBuilder { config: Config, } impl Default for BpeBuilder { fn default() -> Self { Self { config: Config { files: None, vocab: HashMap::new(), merges: vec![], cache_capacity: DEFAULT_CACHE_CAPACITY, dropout: None, unk_token: None, continuing_subword_prefix: None, end_of_word_suffix: None, fuse_unk: false, byte_fallback: false, }, } } } impl BpeBuilder { /// Constructs a new `BpeBuilder`. pub fn new() -> Self { Self::default() } /// Set the input files. #[must_use] pub fn files(mut self, vocab: String, merges: String) -> Self { self.config.files = Some((vocab, merges)); self } /// Set the vocab (token -> ID) and merges mappings. #[must_use] pub fn vocab_and_merges(mut self, vocab: Vocab, merges: Merges) -> Self { self.config.vocab = vocab; self.config.merges = merges; self } /// Set the cache's capacity. Set to 0 if you want to disable caching. #[must_use] pub fn cache_capacity(mut self, capacity: usize) -> Self { self.config.cache_capacity = capacity; self } /// Use [dropout](https://arxiv.org/abs/1910.13267) with the model. #[must_use] pub fn dropout(mut self, dropout: f32) -> Self { self.config.dropout = Some(dropout); self } /// Set the `UNK` token for the vocab. #[must_use] pub fn unk_token(mut self, unk_token: String) -> Self { self.config.unk_token = Some(unk_token); self } /// Set the `continuing_subword_prefix` option. #[must_use] pub fn continuing_subword_prefix(mut self, prefix: String) -> Self { self.config.continuing_subword_prefix = Some(prefix); self } /// Set the `end_of_word_suffix` option. #[must_use] pub fn end_of_word_suffix(mut self, prefix: String) -> Self { self.config.end_of_word_suffix = Some(prefix); self } /// Set the `fuse_unk` option. #[must_use] pub fn fuse_unk(mut self, fuse_unk: bool) -> Self { self.config.fuse_unk = fuse_unk; self } /// Set the `fuse_unk` option. #[must_use] pub fn byte_fallback(mut self, byte_fallback: bool) -> Self { self.config.byte_fallback = byte_fallback; self } /// Returns a `BPE` model that uses the `BpeBuilder`'s configuration. pub fn build(mut self) -> Result<BPE> { // Validate dropout. if let Some(p) = self.config.dropout { if p <= 0.0 || p > 1.0 { return Err(Error::InvalidDropout.into()); } } // Read files if necessary if let Some((vocab, merges)) = self.config.files { let (v, m) = BPE::read_file(&vocab, &merges)?; self.config.vocab = v; self.config.merges = m; } let vocab_r = self .config .vocab .iter() .map(|(key, val)| (*val, key.to_owned())) .collect(); let cache = match self.config.cache_capacity { 0 => None, capacity => Some(Cache::new(capacity)), }; let vocab = self.config.vocab; let prefix_len = if let Some(prefix) = &self.config.continuing_subword_prefix { prefix.len() } else { 0 }; let merge_map: MergeMap = self .config .merges .into_iter() .enumerate() .map(|(i, (a, b))| -> Result<(Pair, (u32, u32))> { let a_id = vocab .get(&a) .ok_or_else(|| Error::MergeTokenOutOfVocabulary(a.to_owned()))?; let b_id = vocab .get(&b) .ok_or_else(|| Error::MergeTokenOutOfVocabulary(b.to_owned()))?; let new_token = format!("{}{}", a, &b[prefix_len..]); let new_id = vocab .get(&new_token) .ok_or(Error::MergeTokenOutOfVocabulary(new_token))?; Ok(((*a_id, *b_id), (i as u32, *new_id))) }) .collect::<Result<MergeMap>>()?; // merges.insert(pair, (rank as u32, *new_id)); Ok(BPE { vocab, vocab_r, merges: merge_map, cache, dropout: self.config.dropout, unk_token: self.config.unk_token, continuing_subword_prefix: self.config.continuing_subword_prefix, end_of_word_suffix: self.config.end_of_word_suffix, fuse_unk: self.config.fuse_unk, byte_fallback: self.config.byte_fallback, }) } } /// A [Byte Pair Encoding](https://www.aclweb.org/anthology/P16-1162/) model. #[derive(PartialEq)] pub struct BPE { /// The vocabulary assigns a number to each token. pub(crate) vocab: Vocab, /// Reversed vocabulary, to rebuild sentences. pub(crate) vocab_r: VocabR, /// Contains the mapping between Pairs and their (rank, new_id). pub(crate) merges: MergeMap, /// Contains the cache for optimizing the encoding step. cache: Option<Cache<String, Word>>, /// Dropout probability for merges. 0 = no dropout is the default. At 1.0, tokenization will /// perform no merges, so the result will just be characters. pub dropout: Option<f32>, /// The unknown token to be used when we encounter an unknown char pub unk_token: Option<String>, /// An optional prefix to use on any subword that exist only behind another one pub continuing_subword_prefix: Option<String>, /// An optional suffix to caracterize and end-of-word subword pub end_of_word_suffix: Option<String>, /// Do multiple unk tokens get fused pub fuse_unk: bool, /// Byte fallback from sentence pieces, instead of UNK, uses `"<0x00>"` /// for each byte in the unk token pub byte_fallback: bool, } impl std::fmt::Debug for BPE { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("BPE") .field("dropout", &self.dropout) .field("unk_token", &self.unk_token) .field("continuing_subword_prefix", &self.continuing_subword_prefix) .field("end_of_word_suffix", &self.end_of_word_suffix) .field("fuse_unk", &self.fuse_unk) .field("byte_fallback", &self.byte_fallback) .field("vocab", &self.vocab.len()) .field("merges", &self.merges.len()) .finish() } } impl Default for BPE { fn default() -> Self { Self::builder().build().unwrap() } } impl Clone for BPE { // `Clone` can't be derive because it's not implemented for `Cache`. // To keep things simple when we clone, the new BPE will start with a fresh cache. fn clone(&self) -> Self { let fresh_cache = self.cache.as_ref().map(|cache| cache.fresh()); Self { vocab: self.vocab.clone(), vocab_r: self.vocab_r.clone(), merges: self.merges.clone(), cache: fresh_cache, dropout: self.dropout, unk_token: self.unk_token.clone(), continuing_subword_prefix: self.continuing_subword_prefix.clone(), end_of_word_suffix: self.end_of_word_suffix.clone(), fuse_unk: self.fuse_unk, byte_fallback: self.byte_fallback, } } } /// Converts the merges strings (for example from `merges.txt` file) with the format /// "{pair_a} {pair_b}" into the format expected by the BPE struct pub(crate) fn convert_merges_to_hashmap<I: Iterator<Item = String>>( iter: I, _vocab: &Vocab, ) -> Result<Merges> { let mut merges = vec![]; let lines = iter.filter(|l| !l.starts_with("#version")); for (rank, line) in lines.enumerate() { let parts = line.split(' ').collect::<Vec<_>>(); if parts.len() != 2 { return Err(Error::BadMerges(rank + 1).into()); } merges.push((parts[0].to_string(), parts[1].to_string())); } Ok(merges) } impl BPE { /// Initialize a `BpeBuilder`. pub fn builder() -> BpeBuilder { BpeBuilder::new() } /// Create a new BPE model with the given vocab and merges. pub fn new(vocab: Vocab, merges: Merges) -> Self { Self::builder() .vocab_and_merges(vocab, merges) .build() .unwrap() } /// Initialize a BpeBuilder model from vocab and merges files pub fn from_file(vocab: &str, merges: &str) -> BpeBuilder { Self::builder().files(vocab.to_owned(), merges.to_owned()) } /// Read the given files to extract the vocab and merges pub fn read_file(vocab: &str, merges: &str) -> Result<(Vocab, Merges)> { // Read vocab.json let vocab_file = File::open(vocab)?; let mut vocab_file = BufReader::new(vocab_file); let mut buffer = String::new(); vocab_file.read_to_string(&mut buffer)?; let json: Value = serde_json::from_str(&buffer)?; let mut vocab = HashMap::new(); match json { Value::Object(m) => { for (token, id) in m { if let Value::Number(id) = id { let id = id.as_u64().ok_or(Error::BadVocabulary)? as u32; vocab.insert(token, id); } } } _ => return Err(Box::new(Error::BadVocabulary)), }; // Read merges file let merge_file = File::open(merges)?; let merge_file = BufReader::new(merge_file); let merges = ResultShunt::process(merge_file.lines(), |iter| { convert_merges_to_hashmap(iter, &vocab) })??; Ok((vocab, merges)) } /// Reset the cache. pub fn clear_cache(&self) { if let Some(ref cache) = self.cache { cache.clear() } } pub fn get_vocab(&self) -> Vocab { self.vocab.clone() } pub fn get_unk_token(&self) -> &Option<String> { &self.unk_token } pub fn get_continuing_subword_prefix(&self) -> &Option<String> { &self.continuing_subword_prefix } fn merge_word(&self, w: &str) -> Result<Word> { let mut indices = w.char_indices().map(|(idx, _)| idx).peekable(); let mut word = Word::with_capacity(w.len()); let mut unk: Option<(u32, usize)> = None; while let Some(i) = indices.next() { let end = indices.peek(); let is_first = i == 0; let is_last = end.is_none(); let mut s = if let Some(e) = end { Cow::Borrowed(&w[i..*e]) } else { Cow::Borrowed(&w[i..]) }; let byte_len = s.len(); // Add the `continuing_subword_prefix` if relevant if !is_first { if let Some(ref prefix) = self.continuing_subword_prefix { s = format!("{}{}", prefix, s).into() } } // Add the `end_of_word_suffix` if relevant if is_last { if let Some(ref suffix) = self.end_of_word_suffix { s = format!("{}{}", s, suffix).into() } } if let Some(id) = self.vocab.get(s.as_ref()) { if let Some((unk_id, unk_len)) = unk { word.add(unk_id, unk_len); unk = None; } word.add(*id, byte_len); } else { if self.byte_fallback { let tokens: Option<Vec<_>> = s .bytes() .map(|b| -> Option<&u32> { let code = format!("<{:#04X}>", b); self.vocab.get(&code) }) .collect(); if let Some(tokens) = tokens { for t in tokens { word.add(*t, 1); } continue; } } if let Some(unk_token) = &self.unk_token { unk = match (unk, self.fuse_unk) { (Some((unk_id, unk_len)), true) => { // Fuse unk Some((unk_id, unk_len + byte_len)) } (Some((unk_id, unk_len)), false) => { // Do not fuse unk, add the previous one word.add(unk_id, unk_len); Some(( *self.vocab.get(unk_token).ok_or_else(|| { Error::UnkTokenOutOfVocabulary(unk_token.to_owned()) })?, byte_len, )) } _ => Some(( *self.vocab.get(unk_token).ok_or_else(|| { Error::UnkTokenOutOfVocabulary(unk_token.to_owned()) })?, byte_len, )), }; } } } if let Some((unk_id, unk_len)) = unk { word.add(unk_id, unk_len); } word.merge_all(&self.merges, self.dropout); Ok(word) } fn word_to_tokens<'a, 'b: 'a>(&'a self, word: &'b Word) -> impl Iterator<Item = Token> + 'a { word.get_chars_iter() .zip(word.get_offsets_iter()) .map(move |(id, offsets)| Token::new(id, self.vocab_r[&id].clone(), offsets)) } fn tokenize_with_cache(&self, sequence: &str) -> Result<Vec<Token>> { if let Some(ref hit) = self.cache.as_ref().and_then(|c| c.get(sequence)) { Ok(self.word_to_tokens(hit).collect()) } else { let word = self.merge_word(sequence)?; let ret = self.word_to_tokens(&word).collect(); if let Some(ref cache) = self.cache { cache.set(sequence.to_owned(), word); } Ok(ret) } } } impl Model for BPE { type Trainer = BpeTrainer; fn get_vocab(&self) -> HashMap<String, u32> { self.vocab.clone() } fn get_vocab_size(&self) -> usize { self.vocab.len() } fn tokenize(&self, sequence: &str) -> Result<Vec<Token>> { if sequence.is_empty() { return Ok(vec![]); } if self.dropout.is_none() { self.tokenize_with_cache(sequence) } else { let word = self.merge_word(sequence)?; Ok(self.word_to_tokens(&word).collect()) } } fn token_to_id(&self, token: &str) -> Option<u32> { self.vocab.get(token).copied() } fn id_to_token(&self, id: u32) -> Option<String> { self.vocab_r.get(&id).cloned() } fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> { let vocab_file_name = match name { Some(name) => format!("{}-vocab.json", name), None => "vocab.json".to_string(), }; // Write vocab.json let vocab_path: PathBuf = [folder, Path::new(vocab_file_name.as_str())] .iter() .collect(); let mut vocab_file = File::create(&vocab_path)?; let order_vocab_iter = OrderedVocabIter::new(&self.vocab_r); let serialized = serde_json::to_string(&order_vocab_iter)?; vocab_file.write_all(serialized.as_bytes())?; // Write merges.txt let merges_file_name = match name { Some(name) => format!("{}-merges.txt", name), None => "merges.txt".to_string(), }; let merges_path: PathBuf = [folder, Path::new(merges_file_name.as_str())] .iter() .collect(); let mut merges_file = File::create(&merges_path)?; let mut merges: Vec<(&Pair, &u32)> = self .merges .iter() .map(|(pair, (rank, _))| (pair, rank)) .collect(); merges.sort_unstable_by_key(|k| *k.1); merges_file.write_all(b"#version: 0.2\n")?; merges_file.write_all( &merges .into_iter() .flat_map(|(pair, _)| { format!("{} {}\n", self.vocab_r[&pair.0], self.vocab_r[&pair.1]).into_bytes() }) .collect::<Vec<_>>()[..], )?; Ok(vec![vocab_path, merges_path]) } fn get_trainer(&self) -> BpeTrainer { BpeTrainer::default() } } #[cfg(test)] mod tests { use super::*; use tempfile::NamedTempFile; #[test] fn test_ordered_vocab_iter() { let vocab_r: VocabR = [ (0, "a".into()), (1, "b".into()), (2, "c".into()), (3, "ab".into()), ] .iter() .cloned() .collect(); let order_vocab_iter = OrderedVocabIter::new(&vocab_r); let serialized = serde_json::to_string(&order_vocab_iter).unwrap(); assert_eq!(serialized, "{\"a\":0,\"b\":1,\"c\":2,\"ab\":3}"); } #[test] fn test_unk_not_fused() { let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)] .iter() .cloned() .collect(); let bpe = BpeBuilder::default() .vocab_and_merges(vocab, vec![]) .unk_token("<unk>".to_string()) .build() .unwrap(); let tokens = bpe.tokenize("c").unwrap(); assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]); let tokens = bpe.tokenize("cc").unwrap(); assert_eq!( tokens, vec![ Token::new(0u32, "<unk>".into(), (0, 1)), Token::new(0u32, "<unk>".into(), (1, 2)), ] ); let tokens = bpe.tokenize("accb").unwrap(); assert_eq!( tokens, vec![ Token::new(1u32, "a".into(), (0, 1)), Token::new(0u32, "<unk>".into(), (1, 2)), Token::new(0u32, "<unk>".into(), (2, 3)), Token::new(2u32, "b".into(), (3, 4)), ] ); } #[test] fn test_unk_get_fused() { let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)] .iter() .cloned() .collect(); let bpe = BpeBuilder::default() .vocab_and_merges(vocab, vec![]) .unk_token("<unk>".to_string()) .fuse_unk(true) .build() .unwrap(); let tokens = bpe.tokenize("c").unwrap(); assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]); let tokens = bpe.tokenize("cc").unwrap(); assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 2)),]); let tokens = bpe.tokenize("accb").unwrap(); assert_eq!( tokens, vec![ Token::new(1u32, "a".into(), (0, 1)), Token::new(0u32, "<unk>".into(), (1, 3)), Token::new(2u32, "b".into(), (3, 4)), ] ); } #[test] // Test tokenization. With dropout set to 0 tokenization is deterministic, // so we know exactly what the result should be. // // To test this, we'll build a simple model to tokenize the word 'unrelated'. fn test_tokenize_with_and_without_dropout() { let vocab: Vocab = [ ("u".into(), 0), ("n".into(), 1), ("r".into(), 2), ("e".into(), 3), ("l".into(), 4), ("a".into(), 5), ("t".into(), 6), ("d".into(), 7), ("re".into(), 8), ("at".into(), 9), ("ed".into(), 10), ("un".into(), 11), ("ated".into(), 12), ("rel".into(), 13), ("related".into(), 14), ("unrelated".into(), 15), ] .iter() .cloned() .collect(); let merges: Merges = vec![ ("r".to_string(), "e".to_string()), ("a".to_string(), "t".to_string()), ("e".to_string(), "d".to_string()), ("u".to_string(), "n".to_string()), ("at".to_string(), "ed".to_string()), ("re".to_string(), "l".to_string()), ("rel".to_string(), "ated".to_string()), ("un".to_string(), "related".to_string()), ]; let mut bpe = BPE::new(vocab, merges); // With no dropout: let tokens = bpe.tokenize("unrelated").unwrap(); assert_eq!(tokens, vec![Token::new(15u32, "unrelated".into(), (0, 9))]); // Now set dropout to 1.0. Result should be no merges performed. bpe.dropout = Some(1.0); let tokens = bpe.tokenize("unrelated").unwrap(); assert_eq!( tokens, vec![ Token::new(0u32, "u".into(), (0, 1)), Token::new(1u32, "n".into(), (1, 2)), Token::new(2u32, "r".into(), (2, 3)), Token::new(3u32, "e".into(), (3, 4)), Token::new(4u32, "l".into(), (4, 5)), Token::new(5u32, "a".into(), (5, 6)), Token::new(6u32, "t".into(), (6, 7)), Token::new(3u32, "e".into(), (7, 8)), Token::new(7u32, "d".into(), (8, 9)), ] ); // Now try with dropout between 0 and 1. bpe.dropout = Some(0.5); let tokens = bpe.tokenize("unrelated").unwrap(); assert!(!tokens.is_empty() && tokens.len() <= 9); } #[test] // Ensure `BPE::from_file` works as expected. fn test_bpe_from_file() { // Set up vocab file. let mut vocab_file = NamedTempFile::new().unwrap(); vocab_file .write_all(b"{\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}") .unwrap(); // Set up merges file. let mut merges_file = NamedTempFile::new().unwrap(); merges_file.write_all(b"#version: 0.2\na b").unwrap(); // Make sure we can instantiate a BPE model from the files. let builder = BPE::from_file( vocab_file.path().to_str().unwrap(), merges_file.path().to_str().unwrap(), ); let bpe = builder.build().unwrap(); // Check merges. assert_eq!(bpe.merges.get(&(0, 1)).unwrap(), &(0u32, 3u32)); // Check vocab. assert_eq!(bpe.vocab.get("a").unwrap(), &0u32); assert_eq!(bpe.vocab.get("b").unwrap(), &1u32); assert_eq!(bpe.vocab.get("c").unwrap(), &2u32); assert_eq!(bpe.vocab.get("ab").unwrap(), &3u32); } #[test] // Ensure `BPE::from_file` works as expected. fn test_bpe_with_continuing_subword_prefix() { let vocab: Vocab = vec![ ("a".to_string(), 0), ("##b".to_string(), 1), ("##c".to_string(), 2), ("ab".to_string(), 3), ("abc".to_string(), 4), ] .into_iter() .collect(); let merges = vec![ ("a".to_string(), "##b".to_string()), ("ab".to_string(), "##c".to_string()), ]; let bpe = BPE::builder() .vocab_and_merges(vocab, merges) .unk_token("[UNK]".to_string()) .continuing_subword_prefix("##".to_string()) .build() .unwrap(); let res = bpe.tokenize("ab"); assert_eq!( res.unwrap(), vec![Token { id: 3, value: "ab".to_string(), offsets: (0, 2) }] ); let res = bpe.tokenize("abc"); assert_eq!( res.unwrap(), vec![Token { id: 4, value: "abc".to_string(), offsets: (0, 3) }] ); } #[test] // Ensure `MergeTokenOutOfVocabulary` error is returned when it should be. fn test_bpe_from_file_merge_token_oov() { // Set up vocab file. let mut vocab_file = NamedTempFile::new().unwrap(); vocab_file .write_all(b"{\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}") .unwrap(); // Set up merges file. let mut merges_file = NamedTempFile::new().unwrap(); merges_file.write_all(b"#version: 0.2\na b\na d").unwrap(); // Ensure the result of BPE::from_file is a MergeTokenOutOfVocabulary error. match BPE::from_file( vocab_file.path().to_str().unwrap(), merges_file.path().to_str().unwrap(), ) .build() { Ok(_) => unreachable!(), Err(err) => match err.downcast_ref::<Error>() { Some(Error::MergeTokenOutOfVocabulary(token)) => { assert_eq!(*token, String::from("d")) } _ => unreachable!(), }, } } #[test] // Ensure `BadMerges` error is returned when there is an invalid line in the // merges.txt file. fn test_bpe_from_file_bad_merges() { // Set up vocab file. let mut vocab_file = NamedTempFile::new().unwrap(); vocab_file .write_all("{\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}".as_bytes()) .unwrap(); // Set up merges file with a bad line. let mut merges_file = NamedTempFile::new().unwrap(); merges_file.write_all(b"#version: 0.2\na b\nc").unwrap(); // Ensure the result of BPE::from_file is a BadMerges error. match BPE::from_file( vocab_file.path().to_str().unwrap(), merges_file.path().to_str().unwrap(), ) .build() { Ok(_) => unreachable!(), Err(err) => match err.downcast_ref::<Error>() { Some(Error::BadMerges(line)) => assert_eq!(*line, 2), _ => unreachable!(), }, } } #[test] fn test_bpe_byte_fallback() { // 0x61 == 'a' in bytes let vocab: Vocab = [("<unk>".into(), 0), ("<0x61>".into(), 1)] .iter() .cloned() .collect(); let bpe = BpeBuilder::default() .vocab_and_merges(vocab, vec![]) .unk_token("<unk>".to_string()) .byte_fallback(true) .build() .unwrap(); let tokens = bpe.tokenize("c").unwrap(); assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]); let tokens = bpe.tokenize("a").unwrap(); assert_eq!(tokens, vec![Token::new(1u32, "<0x61>".into(), (0, 1)),]); } #[test] fn test_bpe_byte_fallback_newline() { // 0x0A == '\n' in bytes let vocab: Vocab = [("<unk>".into(), 0), ("<0x0A>".into(), 1)] .iter() .cloned() .collect(); let bpe = BpeBuilder::default() .vocab_and_merges(vocab, vec![]) .unk_token("<unk>".to_string()) .byte_fallback(true) .build() .unwrap(); let tokens = bpe.tokenize("\n").unwrap(); assert_eq!(tokens, vec![Token::new(1u32, "<0x0A>".into(), (0, 1)),]); } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/bpe/serialization.rs
use super::{super::OrderedVocabIter, convert_merges_to_hashmap, BpeBuilder, Pair, BPE}; use serde::{ de::{Error, MapAccess, Visitor}, ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer, }; use std::collections::HashMap; impl Serialize for BPE { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut model = serializer.serialize_struct("BPE", 8)?; // Start by small fields model.serialize_field("type", "BPE")?; model.serialize_field("dropout", &self.dropout)?; model.serialize_field("unk_token", &self.unk_token)?; model.serialize_field("continuing_subword_prefix", &self.continuing_subword_prefix)?; model.serialize_field("end_of_word_suffix", &self.end_of_word_suffix)?; model.serialize_field("fuse_unk", &self.fuse_unk)?; model.serialize_field("byte_fallback", &self.byte_fallback)?; // Then the large ones let mut merges: Vec<(&Pair, &u32)> = self .merges .iter() .map(|(pair, (rank, _))| (pair, rank)) .collect(); merges.sort_unstable_by_key(|k| *k.1); let merges_str = merges .into_iter() .map(|(pair, _)| format!("{} {}", self.vocab_r[&pair.0], self.vocab_r[&pair.1])) .collect::<Vec<_>>(); let ordered_vocab = OrderedVocabIter::new(&self.vocab_r); model.serialize_field("vocab", &ordered_vocab)?; model.serialize_field("merges", &merges_str)?; model.end() } } impl<'de> Deserialize<'de> for BPE { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_struct( "BPE", &[ "type", "dropout", "unk_token", "continuing_subword_prefix", "end_of_word_suffix", "fuse_unk", "byte_fallback", "vocab", "merges", ], BPEVisitor, ) } } struct BPEVisitor; impl<'de> Visitor<'de> for BPEVisitor { type Value = BPE; fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { write!(fmt, "struct BPE") } fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error> where V: MapAccess<'de>, { let mut builder = BpeBuilder::new(); let mut vocab: Option<HashMap<String, u32>> = None; let mut merges: Option<Vec<String>> = None; while let Some(key) = map.next_key::<String>()? { match key.as_ref() { "dropout" => { if let Some(dropout) = map.next_value()? { builder = builder.dropout(dropout); } } "unk_token" => { if let Some(unk) = map.next_value()? { builder = builder.unk_token(unk); } } "continuing_subword_prefix" => { if let Some(prefix) = map.next_value()? { builder = builder.continuing_subword_prefix(prefix); } } "end_of_word_suffix" => { if let Some(suffix) = map.next_value()? { builder = builder.end_of_word_suffix(suffix); } } "fuse_unk" => { if let Some(suffix) = map.next_value()? { builder = builder.fuse_unk(suffix); } } "byte_fallback" => { if let Some(suffix) = map.next_value()? { builder = builder.byte_fallback(suffix); } } "vocab" => vocab = Some(map.next_value()?), "merges" => merges = Some(map.next_value()?), "type" => match map.next_value()? { "BPE" => {} u => { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(u), &"BPE", )) } }, _ => {} } } if let (Some(vocab), Some(merges)) = (vocab, merges) { let merges = convert_merges_to_hashmap(merges.into_iter(), &vocab).map_err(Error::custom)?; builder = builder.vocab_and_merges(vocab, merges); Ok(builder.build().map_err(Error::custom)?) } else { Err(Error::custom("Missing vocab/merges")) } } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/bpe/trainer.rs
#![allow(clippy::map_entry)] use super::{Pair, WithFirstLastIterator, Word, BPE}; use crate::parallelism::*; use crate::tokenizer::{AddedToken, Result, Trainer}; use crate::utils::progress::{ProgressBar, ProgressStyle}; use serde::{Deserialize, Serialize}; use std::cmp::Ordering; use std::collections::{BinaryHeap, HashMap, HashSet}; #[derive(Debug, Eq)] struct Merge { pair: Pair, count: u32, pos: HashSet<usize>, } impl PartialEq for Merge { fn eq(&self, other: &Self) -> bool { self.count == other.count && self.pair == other.pair } } impl PartialOrd for Merge { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { if self.count != other.count { Some(self.count.cmp(&other.count)) } else { // Here we want ascending order Some(other.pair.cmp(&self.pair)) } } } impl Ord for Merge { fn cmp(&self, other: &Self) -> Ordering { self.partial_cmp(other).unwrap() } } struct Config { min_frequency: u32, vocab_size: usize, show_progress: bool, special_tokens: Vec<AddedToken>, limit_alphabet: Option<usize>, initial_alphabet: HashSet<char>, continuing_subword_prefix: Option<String>, end_of_word_suffix: Option<String>, max_token_length: Option<usize>, } /// A `BpeTrainerBuilder` can be used to create a `BpeTrainer` with a custom /// configuration. pub struct BpeTrainerBuilder { config: Config, } impl Default for BpeTrainerBuilder { fn default() -> Self { Self { config: Config { min_frequency: 0, vocab_size: 30000, show_progress: true, special_tokens: vec![], limit_alphabet: None, initial_alphabet: HashSet::new(), continuing_subword_prefix: None, end_of_word_suffix: None, max_token_length: None, }, } } } impl BpeTrainerBuilder { /// Constructs a new `BpeTrainerBuilder` pub fn new() -> Self { Self::default() } /// Set the expected minimum frequency #[must_use] pub fn min_frequency(mut self, frequency: u32) -> Self { self.config.min_frequency = frequency; self } /// Set the vocabulary size #[must_use] pub fn vocab_size(mut self, size: usize) -> Self { self.config.vocab_size = size; self } /// Set whether to show progress #[must_use] pub fn show_progress(mut self, show: bool) -> Self { self.config.show_progress = show; self } /// Set the special tokens #[must_use] pub fn special_tokens(mut self, tokens: Vec<AddedToken>) -> Self { self.config.special_tokens = tokens; self } /// Set whether to limit the alphabet #[must_use] pub fn limit_alphabet(mut self, limit: usize) -> Self { self.config.limit_alphabet = Some(limit); self } /// Set the initial alphabet #[must_use] pub fn initial_alphabet(mut self, alphabet: HashSet<char>) -> Self { self.config.initial_alphabet = alphabet; self } /// Set the continuing_subword_prefix #[must_use] pub fn continuing_subword_prefix(mut self, prefix: String) -> Self { self.config.continuing_subword_prefix = Some(prefix); self } /// Set the end_of_word_suffix #[must_use] pub fn end_of_word_suffix(mut self, suffix: String) -> Self { self.config.end_of_word_suffix = Some(suffix); self } /// Set max_token_length #[must_use] pub fn max_token_length(mut self, max_token_length: Option<usize>) -> Self { self.config.max_token_length = max_token_length; self } /// Constructs the final BpeTrainer pub fn build(self) -> BpeTrainer { BpeTrainer { min_frequency: self.config.min_frequency, vocab_size: self.config.vocab_size, show_progress: self.config.show_progress, special_tokens: self.config.special_tokens, limit_alphabet: self.config.limit_alphabet, initial_alphabet: self.config.initial_alphabet, continuing_subword_prefix: self.config.continuing_subword_prefix, end_of_word_suffix: self.config.end_of_word_suffix, max_token_length: self.config.max_token_length, words: HashMap::new(), } } } /// In charge of training a `BPE` model /// /// # Examples /// /// ``` /// use tokenizers::tokenizer::Trainer; /// use tokenizers::models::bpe::{BPE, BpeTrainer}; /// /// let sequences = vec![ "Hello", "World" ]; /// /// let mut trainer = BpeTrainer::default(); /// trainer.feed(sequences.iter(), |s| Ok(vec![s.to_owned()])); /// /// let mut model = BPE::default(); /// let special_tokens = trainer.train(&mut model).unwrap(); /// ``` #[non_exhaustive] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)] pub struct BpeTrainer { /// The minimum frequency a pair must have to produce a merge operation pub min_frequency: u32, /// The target vocabulary size pub vocab_size: usize, /// Whether to show progress while training pub show_progress: bool, /// A list of special tokens that the model should know of pub special_tokens: Vec<AddedToken>, /// Whether to limit the number of initial tokens that can be kept before computing merges pub limit_alphabet: Option<usize>, /// The initial alphabet we want absolutely to include. This allows to cover /// some characters that are not necessarily in the training set pub initial_alphabet: HashSet<char>, /// An optional prefix to use on any subword that exist only behind another one pub continuing_subword_prefix: Option<String>, /// An optional suffix to caracterize and end-of-word subword pub end_of_word_suffix: Option<String>, /// An optional parameter to limit the max length of any single token pub max_token_length: Option<usize>, words: HashMap<String, u32>, } impl Default for BpeTrainer { fn default() -> Self { Self::builder().build() } } impl BpeTrainer { pub fn new(min_frequency: u32, vocab_size: usize) -> Self { Self { min_frequency, vocab_size, ..Default::default() } } pub fn builder() -> BpeTrainerBuilder { BpeTrainerBuilder::new() } /// Setup a progress bar if asked to show progress fn setup_progress(&self) -> Option<ProgressBar> { if self.show_progress { let p = ProgressBar::new(0); p.set_style( ProgressStyle::default_bar() .template("[{elapsed_precise}] {msg:<40!} {wide_bar} {pos:<9!}/{len:>9!}"), ); Some(p) } else { None } } /// Set the progress bar in the finish state fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) { if let Some(p) = p { p.set_length(final_len as u64); p.finish(); println!(); } } /// Update the progress bar with the new provided length and message fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &str) { if let Some(p) = p { p.set_message(message); p.set_length(len as u64); p.set_draw_delta(len as u64 / 100); p.reset(); } } /// Add the provided special tokens to the initial vocabulary fn add_special_tokens(&self, w2id: &mut HashMap<String, u32>, id2w: &mut Vec<String>) { for token in &self.special_tokens { if !w2id.contains_key(&token.content) { id2w.push(token.content.to_owned()); w2id.insert(token.content.to_owned(), (id2w.len() - 1) as u32); } } } /// Compute the initial alphabet and limit it if relevant fn compute_alphabet( &self, wc: &HashMap<String, u32>, w2id: &mut HashMap<String, u32>, id2w: &mut Vec<String>, ) { // Compute the alphabet from seen words let mut alphabet: HashMap<char, usize> = HashMap::new(); for (word, count) in wc { for c in word.chars() { alphabet .entry(c) .and_modify(|cnt| *cnt += *count as usize) .or_insert(*count as usize); } } // Also include anything from the provided initial alphabet for c in &self.initial_alphabet { alphabet .entry(*c) .and_modify(|cnt| *cnt = std::usize::MAX) .or_insert(std::usize::MAX); } let mut kept = alphabet.iter().collect::<Vec<_>>(); // Compute the number of chars to remove from the alphabet // If `limit_alphabet < initial_alphabet.len()`, some of these initial characters // will be removed let to_remove = self .limit_alphabet .map(|limit| { if alphabet.len() > limit { alphabet.len() - limit } else { 0 } }) .unwrap_or(0); // Remove the unwanted chars if to_remove > 0 { kept.sort_unstable_by_key(|k| *k.1); kept.drain(..to_remove); } // Keep the initial alphabet (sorted for determinism) kept.sort_unstable_by_key(|k| (*k.0) as u32); kept.into_iter().for_each(|(c, _)| { let s = c.to_string(); if !w2id.contains_key(&s) { id2w.push(s.clone()); w2id.insert(s, (id2w.len() - 1) as u32); } }); } /// Tokenize words and add subwords to the vocabulary when relevant fn tokenize_words( &self, wc: &HashMap<String, u32>, w2id: &mut HashMap<String, u32>, id2w: &mut Vec<String>, p: &Option<ProgressBar>, ) -> (Vec<Word>, Vec<u32>) { let mut words: Vec<Word> = Vec::with_capacity(wc.len()); let mut counts: Vec<u32> = Vec::with_capacity(wc.len()); for (word, count) in wc { let mut current_word = Word::new(); counts.push(*count); for (is_first, is_last, c) in word.chars().with_first_and_last() { let mut s = c.to_string(); if w2id.contains_key(&s) { // Found the initial char in the authorized alphabet // Add the `continuing_subword_prefix` if relevant if !is_first { if let Some(prefix) = &self.continuing_subword_prefix { s = format!("{}{}", prefix, s); } } // Add the `end_of_word_suffix` if relevant if is_last { if let Some(suffix) = &self.end_of_word_suffix { s = format!("{}{}", s, suffix); } } // Insert the new formed string if necessary if !w2id.contains_key(&s) { id2w.push(s.clone()); w2id.insert(s.clone(), (id2w.len() - 1) as u32); } current_word.add(w2id[&s], 1); // We do not care about the len here } } words.push(current_word); if let Some(p) = p { p.inc(1); } } (words, counts) } fn count_pairs( &self, words: &[Word], counts: &[u32], p: &Option<ProgressBar>, ) -> (HashMap<Pair, i32>, HashMap<Pair, HashSet<usize>>) { words .maybe_par_iter() .enumerate() .map(|(i, word)| { let mut pair_counts = HashMap::new(); let mut where_to_update: HashMap<Pair, HashSet<usize>> = HashMap::new(); for window in word.get_chars().windows(2) { let cur_pair: Pair = (window[0], window[1]); // Initialize pair_counts and where_to_update for this pair if we just saw it if !pair_counts.contains_key(&cur_pair) { pair_counts.insert(cur_pair, 0); } // Then update counts let count = counts[i]; where_to_update .entry(cur_pair) .and_modify(|h| { h.insert(i); }) .or_insert_with(|| { let mut h = HashSet::new(); h.insert(i); h }); *pair_counts.get_mut(&cur_pair).unwrap() += count as i32; } if let Some(p) = &p { p.inc(1); } (pair_counts, where_to_update) }) .reduce( || (HashMap::new(), HashMap::new()), |(mut pair_counts, mut where_to_update), (pc, wtu)| { for (k, v) in pc { pair_counts.entry(k).and_modify(|c| *c += v).or_insert(v); } for (k, v) in wtu { where_to_update .entry(k) .and_modify(|set| *set = set.union(&v).copied().collect()) .or_insert(v); } (pair_counts, where_to_update) }, ) } pub fn do_train( &self, word_counts: &HashMap<String, u32>, model: &mut BPE, ) -> Result<Vec<AddedToken>> { let mut word_to_id: HashMap<String, u32> = HashMap::with_capacity(self.vocab_size); let mut id_to_word: Vec<String> = Vec::with_capacity(self.vocab_size); let max_token_length: usize = self.max_token_length.unwrap_or(usize::MAX); let progress = self.setup_progress(); // // 1. Add all special tokens to the vocabulary // self.add_special_tokens(&mut word_to_id, &mut id_to_word); // // 2. Compute the initial alphabet // self.compute_alphabet(word_counts, &mut word_to_id, &mut id_to_word); // // 3. Tokenize words // self.update_progress(&progress, word_counts.len(), "Tokenize words"); let (words, counts) = self.tokenize_words(word_counts, &mut word_to_id, &mut id_to_word, &progress); self.finalize_progress(&progress, words.len()); // // 4. Count pairs in words // self.update_progress(&progress, words.len(), "Count pairs"); let (mut pair_counts, mut where_to_update) = self.count_pairs(&words, &counts, &progress); // Insert them in the queue let mut queue = BinaryHeap::with_capacity(pair_counts.len()); where_to_update.drain().for_each(|(pair, pos)| { let count = pair_counts[&pair]; if count > 0 { queue.push(Merge { pair, count: count as u32, pos, }); } }); self.finalize_progress(&progress, words.len()); // // 5. Do merges // self.update_progress(&progress, self.vocab_size, "Compute merges"); let mut merges: Vec<(Pair, u32)> = vec![]; loop { // Stop as soon as we have a big enough vocabulary if word_to_id.len() >= self.vocab_size { break; } if queue.is_empty() { break; } let mut top = queue.pop().unwrap(); if top.count != pair_counts[&top.pair] as u32 { top.count = pair_counts[&top.pair] as u32; queue.push(top); continue; } if top.count < 1 || self.min_frequency > top.count { break; } let part_a = &id_to_word[top.pair.0 as usize]; let mut part_b = id_to_word[top.pair.1 as usize].to_owned(); // Build new token if let Some(prefix) = &self.continuing_subword_prefix { if part_b.starts_with(prefix) { let prefix_byte_len = prefix.chars().map(|c| c.len_utf8()).sum(); part_b = part_b[prefix_byte_len..].to_string(); } } let new_token = format!("{}{}", part_a, part_b); // implement sentencepiece-like merge. // if this code were to be merged, integrate a way in the python bindings to communicate this variable // default should be 0/None to maintain previous behavior. 16 is the spm default. // Insert new token if it does not already exist let new_token_id = word_to_id .get(&new_token) .copied() .unwrap_or(id_to_word.len() as u32); if word_to_id.get(&new_token).is_none() { id_to_word.push(new_token.clone()); word_to_id.insert(new_token.clone(), new_token_id); } merges.push((top.pair, new_token_id)); // Merge the new pair in every words let changes = top .pos .maybe_par_iter() .flat_map(|i| { let w = &words[*i] as *const _ as *mut _; // We can merge each of these words in parallel here because each position // can be there only once (HashSet). So this is safe. unsafe { let word: &mut Word = &mut (*w); word.merge(top.pair.0, top.pair.1, new_token_id, max_token_length) .into_iter() .map(|c| (c, *i)) .collect::<Vec<_>>() } }) .collect::<Vec<_>>(); // Introduce new formed pairs for ((pair, change), iw) in changes { let count = change * counts[iw] as i32; pair_counts .entry(pair) .and_modify(|c| *c += count) .or_insert(count); if change > 0 { where_to_update .entry(pair) .and_modify(|h| { h.insert(iw); }) .or_insert_with(|| { let mut h = HashSet::new(); h.insert(iw); h }); } } where_to_update.drain().for_each(|(pair, pos)| { let count = pair_counts[&pair]; if count > 0 { queue.push(Merge { pair, count: count as u32, pos, }); } }); if let Some(p) = &progress { p.inc(1); } } self.finalize_progress(&progress, merges.len()); // Transfer new vocab & options to model model.vocab = word_to_id; model.vocab_r = model .vocab .iter() .map(|(key, val)| (*val, key.to_owned())) .collect(); model.merges = merges .into_iter() .enumerate() .map(|(i, (pair, new_token_id))| (pair, (i as u32, new_token_id))) .collect(); if let Some(prefix) = &self.continuing_subword_prefix { model.continuing_subword_prefix = Some(prefix.to_owned()); } else { model.continuing_subword_prefix = None; } if let Some(suffix) = &self.end_of_word_suffix { model.end_of_word_suffix = Some(suffix.to_owned()); } else { model.end_of_word_suffix = None; } Ok(self.special_tokens.clone()) } } impl Trainer for BpeTrainer { type Model = BPE; /// Train a BPE model fn train(&self, model: &mut BPE) -> Result<Vec<AddedToken>> { self.do_train(&self.words, model) } /// Whether we should show progress fn should_show_progress(&self) -> bool { self.show_progress } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { let words: Result<HashMap<String, u32>> = iterator .maybe_par_bridge() .map(|sequence| { let words = process(sequence.as_ref())?; let mut map = HashMap::new(); for word in words { map.entry(word).and_modify(|c| *c += 1).or_insert(1); } Ok(map) }) .reduce( || Ok(HashMap::new()), |acc, ws| { let mut acc = acc?; for (k, v) in ws? { acc.entry(k).and_modify(|c| *c += v).or_insert(v); } Ok(acc) }, ); self.words = words?; Ok(()) } } #[cfg(test)] mod tests { use super::{BpeTrainer, Pair, BPE}; use std::collections::HashMap; #[test] fn test_train() { let word_counts: HashMap<String, u32> = [ ("roses".into(), 1), ("are".into(), 2), ("red".into(), 1), ("voilets".into(), 1), ("blue".into(), 1), ("BERT".into(), 1), ("is".into(), 2), ("big".into(), 1), ("and".into(), 1), ("so".into(), 1), ("GPT-2".into(), 1), ] .iter() .cloned() .collect(); let trainer = BpeTrainer::builder() .show_progress(false) .min_frequency(2) .build(); let mut model = BPE::default(); trainer.do_train(&word_counts, &mut model).unwrap(); // Vocab should contain all of the characters from the `word_counts` mapping // as well as three merges: 're', 'are', and 'is'. let expected_vocab: HashMap<String, u32> = [ ("-".into(), 0), ("2".into(), 1), ("B".into(), 2), ("E".into(), 3), ("G".into(), 4), ("P".into(), 5), ("R".into(), 6), ("T".into(), 7), ("a".into(), 8), ("b".into(), 9), ("d".into(), 10), ("e".into(), 11), ("g".into(), 12), ("i".into(), 13), ("l".into(), 14), ("n".into(), 15), ("o".into(), 16), ("r".into(), 17), ("s".into(), 18), ("t".into(), 19), ("u".into(), 20), ("v".into(), 21), ("re".into(), 22), ("are".into(), 23), ("is".into(), 24), ] .iter() .cloned() .collect(); assert_eq!(model.vocab, expected_vocab); // The keys in `merges` are pairs of symbols, the values are tuples of (rank, id), // where 'rank' determines the order in which this merge will be applied during // tokenization, and 'id' is the vocab id of the symbol resulting from merging // the pair of symbols in the corresponding key. let expected_merges: HashMap<Pair, (u32, u32)> = [ ((17, 11), (0, 22)), // 'r' + 'e' -> 're' ((8, 22), (1, 23)), // 'a' + 're' -> 'are' ((13, 18), (2, 24)), // 'i' + 's' -> 'is' ] .iter() .cloned() .collect(); assert_eq!(model.merges, expected_merges); } #[test] fn bpe_test_max_token_length_16() { /* bpe_test_max_token_length series of tests test the max_token_length flag of bpetrainer // this is the more robust version that only tests max length of learned tokens // (pre) tokenizer settings or vocab can be easily modified when necessary */ let max_token_length = 16; let long_word_counts: HashMap<String, u32> = [ ("singlelongtokenwithoutcasechange", 2), ("singleLongTokenWithCamelCaseChange", 2), ("Longsingletokenwithpunctu@t!onwithin", 2), ("Anotherlongsingletokenwithnumberw1th1n", 2), ("짧은한글문자열짧은한", 2), // korean 10 char ("긴한글문자열긴한글문자열긴한글문", 2), // korean 16 char ("短字符串短字符串短字", 2), //simplified chinese 10 char ("长字符串长字符串长字符串长字符串", 2), // simp. chinese 16 char ("短い文字列短い文字列", 2), // japanese 10 char ("長い文字列長い文字列長い文字列長", 2), // japanese 16 char ("so", 2), ("GPT-2", 2), ] .iter() .map(|(key, value)| (key.to_string(), *value)) .collect(); let trainer = BpeTrainer::builder() .max_token_length(Some(max_token_length)) .show_progress(false) .min_frequency(0) .build(); let mut model = BPE::default(); trainer.do_train(&long_word_counts, &mut model).unwrap(); let vocab = model.get_vocab(); for token in vocab.keys() { assert!( token.chars().count() <= max_token_length, "token too long : {} , chars().count() = {}", token, token.chars().count() ) } } #[test] fn bpe_test_max_token_length_direct_assert() { /* more direct version of bpe_test_max_token_length test // directly compares tokens with known expected values. // maybe unstable depending on specific settings or changes. */ let long_word_counts: HashMap<String, u32> = [ ("sin", 2), ("Sin", 2), ("Lon", 2), ("Ano", 2), ("짧은한", 2), ("긴한글", 2), ("短字符", 2), ("长字符", 2), ("短い文", 2), ("長い文", 2), ("so", 2), ("GP", 2), ] .iter() .map(|(key, value)| (key.to_string(), *value)) .collect(); let trainer = BpeTrainer::builder() .max_token_length(Some(2)) .show_progress(false) .min_frequency(0) .build(); let mut model = BPE::default(); trainer.do_train(&long_word_counts, &mut model).unwrap(); let trained_vocab: HashMap<String, u32> = model.get_vocab(); let expected_vocab: HashMap<String, u32> = [ ("短", 12), ("n", 6), ("i", 5), ("s", 8), ("字符", 23), ("長", 14), ("긴", 17), ("い文", 22), ("L", 2), ("in", 21), ("o", 7), ("은한", 29), ("S", 4), ("P", 3), ("so", 27), ("符", 13), ("文", 11), ("字", 10), ("짧", 19), ("GP", 25), ("글", 16), ("G", 1), ("An", 24), ("长", 15), ("A", 0), ("Lo", 26), ("긴한", 28), ("い", 9), ("한", 20), ("은", 18), ] .iter() .cloned() .map(|(k, v)| (k.to_string(), v)) .collect(); assert_eq!(trained_vocab, expected_vocab) } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/bpe/word.rs
use super::Pair; use rand::{thread_rng, Rng}; use std::cmp::Ordering; use std::collections::{BinaryHeap, HashMap}; #[derive(Debug, Eq)] struct Merge { pos: usize, rank: u32, new_id: u32, } impl PartialEq for Merge { fn eq(&self, other: &Self) -> bool { self.rank == other.rank && self.pos == other.pos } } impl PartialOrd for Merge { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { // By manually implementing this, we make the containing BinaryHeap a // min-heap ordered first on the rank, and the pos otherwise if self.rank != other.rank { Some(other.rank.cmp(&self.rank)) } else { Some(other.pos.cmp(&self.pos)) } } } impl Ord for Merge { fn cmp(&self, other: &Self) -> Ordering { self.partial_cmp(other).unwrap() } } #[derive(Debug, Clone, Copy)] struct Symbol { c: u32, prev: isize, next: isize, len: usize, } impl Symbol { /// Merges the current Symbol with the other one. /// In order to update prev/next, we consider Self to be the Symbol on the left, /// and other to be the next one on the right. pub fn merge_with(&mut self, other: &Self, new_c: u32) { self.c = new_c; self.len += other.len; self.next = other.next; } } #[derive(Clone, Default)] pub(super) struct Word { symbols: Vec<Symbol>, } impl std::fmt::Debug for Word { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("Word") .field( "chars", &self .symbols .iter() .map(|s| s.c.to_string()) .collect::<Vec<_>>() .join(" "), ) .field("symbols", &self.symbols) .finish() } } impl Word { pub(super) fn new() -> Self { Word { symbols: vec![] } } pub(super) fn with_capacity(capacity: usize) -> Self { Self { symbols: Vec::with_capacity(capacity), } } pub(super) fn add(&mut self, c: u32, byte_len: usize) { let (prev, next) = { let len = self.symbols.len() as isize; if let Some(last) = self.symbols.last_mut() { // Update `next` on the previous one last.next = len; (len - 1, -1) } else { (-1, -1) } }; self.symbols.push(Symbol { c, prev, next, len: byte_len, }); } pub(super) fn merge( &mut self, c1: u32, c2: u32, replacement: u32, max_length: usize, ) -> Vec<(Pair, i32)> { let mut changes: Vec<(Pair, i32)> = vec![]; let mut i = 0; loop { if i >= self.symbols.len() { break; } // Found a pair if self.symbols[i].c == c1 && i + 1 < self.symbols.len() && self.symbols[i + 1].c == c2 { let first = self.symbols[i]; let second = self.symbols[i + 1]; // Remove in place let new_s = Symbol { c: replacement, prev: first.prev, next: second.next, len: first.len + second.len, }; // If there are other characters before the pair if i > 0 { changes.push(((self.symbols[i - 1].c, first.c), -1)); if self.symbols[i - 1].len + new_s.len < max_length { changes.push(((self.symbols[i - 1].c, replacement), 1)); } } self.symbols.insert(i, new_s); // Insert replacement before first char of pair self.symbols.remove(i + 1); // Remove first char of pair self.symbols.remove(i + 1); // And then the second // If there are other characters after the pair if i < self.symbols.len() - 1 { changes.push(((second.c, self.symbols[i + 1].c), -1)); if self.symbols[i + 1].len + new_s.len < max_length { changes.push(((replacement, self.symbols[i + 1].c), 1)); } } } i += 1; } changes } pub(super) fn merge_all(&mut self, merges: &HashMap<Pair, (u32, u32)>, dropout: Option<f32>) { let mut queue = BinaryHeap::with_capacity(self.symbols.len()); let mut skip = Vec::with_capacity(queue.len()); queue.extend( self.symbols .windows(2) .enumerate() .filter_map(|(index, window)| { let pair = (window[0].c, window[1].c); merges.get(&pair).map(|m| Merge { pos: index, rank: m.0, new_id: m.1, }) }), ); while let Some(top) = queue.pop() { if dropout .map(|d| thread_rng().gen::<f32>() < d) .unwrap_or(false) { skip.push(top); } else { // Re-insert the skipped elements queue.extend(skip.drain(..)); if self.symbols[top.pos].len == 0 { continue; } // Do nothing if we are the last symbol if self.symbols[top.pos].next == -1 { continue; } let next_pos = self.symbols[top.pos].next as usize; let right = self.symbols[next_pos]; // Make sure we are not processing an expired queue entry let target_new_pair = (self.symbols[top.pos].c, right.c); if !merges .get(&target_new_pair) .map_or(false, |(_, new_id)| *new_id == top.new_id) { continue; } // Otherwise, let's merge self.symbols[top.pos].merge_with(&right, top.new_id); // Tag the right part as removed self.symbols[next_pos].len = 0; // Update `prev` on the new `next` to the current pos if right.next > -1 && (right.next as usize) < self.symbols.len() { self.symbols[right.next as usize].prev = top.pos as isize; } // Insert the new pair formed with the previous symbol let current = &self.symbols[top.pos]; if current.prev >= 0 { let prev = current.prev as usize; let prev_symbol = self.symbols[prev]; let new_pair = (prev_symbol.c, current.c); if let Some((rank, new_id)) = merges.get(&new_pair) { queue.push(Merge { pos: current.prev as usize, rank: *rank, new_id: *new_id, }); } } // Insert the new pair formed with the next symbol let next = current.next as usize; if next < self.symbols.len() { let next_symbol = self.symbols[next]; let new_pair = (current.c, next_symbol.c); if let Some((rank, new_id)) = merges.get(&new_pair) { queue.push(Merge { pos: top.pos, rank: *rank, new_id: *new_id, }); } } } } // Filter out the removed symbols self.symbols.retain(|s| s.len != 0); } pub(super) fn get_chars(&self) -> Vec<u32> { self.symbols.iter().map(|s| s.c).collect() } pub(super) fn get_chars_iter(&self) -> impl Iterator<Item = u32> + '_ { self.symbols.iter().map(|s| s.c) } pub(super) fn get_offsets_iter(&self) -> impl Iterator<Item = (usize, usize)> + '_ { let mut pos = 0; self.symbols.iter().map(move |symbol| { let new_pos = pos + symbol.len; let offset = (pos, new_pos); pos = new_pos; offset }) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_merge() { // Let's say we have the word 'hello' and a word-to-id vocab that looks // like this: {'h': 0, 'e': 1, 'l': 2, 'o': 3}. let mut word = Word::new(); word.add(0, 1); // 'h' word.add(1, 1); // 'e' word.add(2, 1); // 'l' word.add(2, 1); // 'l' word.add(3, 1); // 'o' // We're going to perform a merge on the pair ('l', 'l') ~= (2, 2). Let's // say that 'll' has the ID of 4 in the updated word-to-id vocab. let changes = word.merge(2, 2, 4, usize::MAX); // So the word should now look like this: assert_eq!( word.get_chars(), &[ 0u32, // 'h' 1u32, // 'e' 4u32, // 'll' 3u32, // 'o' ] ); // The return value `changes` will be used to update the pair counts during // training. This merge affects the counts for the pairs // ('e', 'l') ~= (1, 2), // ('e', 'll') ~= (1, 4), // ('l', 'o') ~= (2, 3), and // ('ll', 'o') ~= (4, 3). // So the changes should reflect that: assert_eq!( changes, &[ ((1u32, 2u32), -1i32), // count for ('e', 'l') should be decreased by 1. ((1u32, 4u32), 1i32), // count for ('e', 'll') should be increased by 1. ((2u32, 3u32), -1i32), // count for ('l', 'o') should be decreased by 1. ((4u32, 3u32), 1i32), // count for ('ll', 'o') should be increased by 1. ] ); } #[test] fn test_merge_max_length() { // Let's say we have the word 'hello' and a word-to-id vocab that looks // like this: {'h': 0, 'e': 1, 'l': 2, 'o': 3}. let mut word = Word::new(); word.add(0, 1); // 'h' word.add(1, 1); // 'e' word.add(2, 1); // 'l' word.add(2, 1); // 'l' word.add(3, 1); // 'o' // We're going to perform a merge on the pair ('l', 'l') ~= (2, 2). Let's // say that 'll' has the ID of 4 in the updated word-to-id vocab. let changes = word.merge(2, 2, 4, 2); assert_eq!( word.get_chars(), &[ 0u32, // 'h' 1u32, // 'e' 4u32, // 'll' 3u32, // 'o' ] ); assert_eq!( changes, &[ ((1u32, 2u32), -1i32), // count for ('e', 'l') should be decreased by 1. // ((1u32, 4u32), 1i32), Missing since this would be larger than 2 ((2u32, 3u32), -1i32), // count for ('l', 'o') should be decreased by 1. // ((4u32, 3u32), 1i32), Missing since this would be larger than 2 ] ); } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/wordpiece/mod.rs
//! [WordPiece](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf) //! model. use crate::models::bpe::BPE; use crate::tokenizer::{Model, Result, Token}; use std::{ borrow::Cow, collections::HashMap, fs::File, io::prelude::*, io::{BufRead, BufReader}, path::{Path, PathBuf}, }; mod serialization; mod trainer; pub use trainer::*; #[derive(thiserror::Error, Debug)] pub enum Error { #[error("WordPiece error: Missing [UNK] token from the vocabulary")] MissingUnkToken, } type Vocab = HashMap<String, u32>; type VocabR = HashMap<u32, String>; struct Config { files: Option<String>, vocab: Vocab, unk_token: String, continuing_subword_prefix: String, max_input_chars_per_word: usize, } /// A `WordPieceBuilder` can be used to create a `WordPiece` model with a custom configuration. pub struct WordPieceBuilder { config: Config, } impl Default for WordPieceBuilder { fn default() -> Self { Self { config: Config { files: None, vocab: HashMap::new(), unk_token: String::from("[UNK]"), continuing_subword_prefix: String::from("##"), max_input_chars_per_word: 100, }, } } } impl WordPieceBuilder { /// Construct a new `WordPieceBuilder`. pub fn new() -> Self { Self::default() } /// Set the input files. #[must_use] pub fn files(mut self, vocab: String) -> Self { self.config.files = Some(vocab); self } /// Set the vocab (token -> ID) mapping. #[must_use] pub fn vocab(mut self, vocab: Vocab) -> Self { self.config.vocab = vocab; self } /// The the `UNK` token for the vocab. #[must_use] pub fn unk_token(mut self, unk_token: String) -> Self { self.config.unk_token = unk_token; self } /// Set the prefix for continuing subwords. #[must_use] pub fn continuing_subword_prefix(mut self, continuing_subword_prefix: String) -> Self { self.config.continuing_subword_prefix = continuing_subword_prefix; self } /// Set the maximum number of input characters per word. #[must_use] pub fn max_input_chars_per_word(mut self, max_input_chars_per_word: usize) -> Self { self.config.max_input_chars_per_word = max_input_chars_per_word; self } /// Contructs a `WordPiece` model that uses the `WordPieceBuilder`'s configuration. pub fn build(mut self) -> Result<WordPiece> { if let Some(vocab) = self.config.files { self.config.vocab = WordPiece::read_file(&vocab)?; } let vocab_r = self .config .vocab .iter() .map(|(key, val)| (*val, key.to_owned())) .collect(); Ok(WordPiece { vocab: self.config.vocab, vocab_r, unk_token: self.config.unk_token, continuing_subword_prefix: self.config.continuing_subword_prefix, max_input_chars_per_word: self.config.max_input_chars_per_word, }) } } /// A /// [WordPiece](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf) /// model. #[derive(Clone, PartialEq, Eq)] pub struct WordPiece { vocab: Vocab, vocab_r: VocabR, pub unk_token: String, pub continuing_subword_prefix: String, pub max_input_chars_per_word: usize, } impl std::fmt::Debug for WordPiece { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("WordPiece") .field("unk_token", &self.unk_token) .field("continuing_subword_prefix", &self.continuing_subword_prefix) .field("max_input_chars_per_word", &self.max_input_chars_per_word) .field("vocab", &self.vocab.len()) .finish() } } impl Default for WordPiece { fn default() -> Self { Self { vocab: HashMap::new(), vocab_r: HashMap::new(), unk_token: String::from("[UNK]"), continuing_subword_prefix: String::from("##"), max_input_chars_per_word: 100, } } } impl WordPiece { /// Get a `WordPieceBuilder`. pub fn builder() -> WordPieceBuilder { WordPieceBuilder::new() } /// Read the given files to extract the vocab pub fn read_file(vocab: &str) -> Result<Vocab> { let file = File::open(vocab)?; let file = BufReader::new(file); let mut vocab = HashMap::new(); for (index, line) in file.lines().enumerate() { let line = line?; vocab.insert(line.trim_end().to_owned(), index as u32); } Ok(vocab) } /// Initialize a `WordPiece` model from a vocab mapping file. pub fn from_file(vocab: &str) -> WordPieceBuilder { WordPiece::builder().files(vocab.to_owned()) } /// Create a `WordPiece` model from a `BPE` model. pub fn from_bpe(bpe: &BPE) -> Self { let mut wp = Self::builder().vocab(bpe.get_vocab()).build().unwrap(); if let Some(unk) = bpe.get_unk_token() { wp.unk_token = unk.to_owned(); } if let Some(prefix) = bpe.get_continuing_subword_prefix() { wp.continuing_subword_prefix = prefix.to_owned(); } wp } } impl Model for WordPiece { type Trainer = WordPieceTrainer; fn get_vocab(&self) -> HashMap<String, u32> { self.vocab.clone() } fn get_vocab_size(&self) -> usize { self.vocab.len() } fn tokenize(&self, sequence: &str) -> Result<Vec<Token>> { let char_len = sequence.chars().count(); if char_len > self.max_input_chars_per_word { return Ok(vec![Token { value: self.unk_token.clone(), id: *self .vocab .get(&self.unk_token) .ok_or(Error::MissingUnkToken)?, offsets: (0, sequence.len()), }]); } let mut is_bad = false; let mut start = 0; let mut sub_tokens: Vec<Token> = vec![]; while start < sequence.len() { let mut end = sequence.len(); let mut cur_str = None; while start < end { let mut substr: Cow<str> = Cow::Borrowed(&sequence[start..end]); if start > 0 { substr = Cow::Owned(format!("{}{}", self.continuing_subword_prefix, substr)); } if self.vocab.contains_key(substr.as_ref()) { cur_str = Some(Token { id: self.vocab[substr.as_ref()], value: substr.to_string(), offsets: (start, end), }); break; } end -= substr.chars().last().map_or(1, |c| c.len_utf8()); } if cur_str.is_none() { is_bad = true; break; } sub_tokens.push(cur_str.unwrap()); start = end; } if is_bad { Ok(vec![Token { value: self.unk_token.clone(), id: *self .vocab .get(&self.unk_token) .ok_or(Error::MissingUnkToken)?, offsets: (0, sequence.len()), }]) } else { Ok(sub_tokens) } } fn token_to_id(&self, token: &str) -> Option<u32> { self.vocab.get(token).copied() } fn id_to_token(&self, id: u32) -> Option<String> { self.vocab_r.get(&id).cloned() } fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> { let vocab_file_name = match name { Some(name) => format!("{}-vocab.txt", name), None => "vocab.txt".to_string(), }; // Write vocab.txt let vocab_path: PathBuf = [folder, Path::new(vocab_file_name.as_str())] .iter() .collect(); let mut vocab_file = File::create(&vocab_path)?; let mut vocab: Vec<(&String, &u32)> = self.vocab.iter().collect(); vocab.sort_unstable_by_key(|k| *k.1); vocab_file.write_all( &vocab .into_iter() .flat_map(|(token, _)| format!("{}\n", token).as_bytes().to_owned()) .collect::<Vec<_>>()[..], )?; Ok(vec![vocab_path]) } fn get_trainer(&self) -> Self::Trainer { WordPieceTrainer::builder().build() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_error_display() { assert!(format!("{}", Error::MissingUnkToken).contains("Missing [UNK] token")); } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/wordpiece/serialization.rs
use super::{super::OrderedVocabIter, WordPiece, WordPieceBuilder}; use serde::{ de::{MapAccess, Visitor}, ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer, }; use std::collections::HashSet; impl Serialize for WordPiece { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut model = serializer.serialize_struct("WordPiece", 5)?; // Small fields first model.serialize_field("type", "WordPiece")?; model.serialize_field("unk_token", &self.unk_token)?; model.serialize_field("continuing_subword_prefix", &self.continuing_subword_prefix)?; model.serialize_field("max_input_chars_per_word", &self.max_input_chars_per_word)?; // Then large ones let ordered_vocab = OrderedVocabIter::new(&self.vocab_r); model.serialize_field("vocab", &ordered_vocab)?; model.end() } } impl<'de> Deserialize<'de> for WordPiece { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_struct( "WordPiece", &[ "type", "unk_token", "continuing_subword_prefix", "max_input_chars_per_word", "vocab", ], WordPieceVisitor, ) } } struct WordPieceVisitor; impl<'de> Visitor<'de> for WordPieceVisitor { type Value = WordPiece; fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { write!(fmt, "struct WordPiece") } fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error> where V: MapAccess<'de>, { let mut builder = WordPieceBuilder::new(); let mut missing_fields = vec![ // for retrocompatibility the "type" field is not mandatory "unk_token", "continuing_subword_prefix", "max_input_chars_per_word", "vocab", ] .into_iter() .collect::<HashSet<_>>(); while let Some(key) = map.next_key::<String>()? { match key.as_ref() { "unk_token" => builder = builder.unk_token(map.next_value()?), "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(map.next_value()?) } "max_input_chars_per_word" => { builder = builder.max_input_chars_per_word(map.next_value()?) } "vocab" => builder = builder.vocab(map.next_value()?), "type" => match map.next_value()? { "WordPiece" => {} u => { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(u), &"WordPiece", )) } }, _ => {} } missing_fields.remove::<str>(&key); } if !missing_fields.is_empty() { Err(serde::de::Error::missing_field( missing_fields.iter().next().unwrap(), )) } else { Ok(builder.build().map_err(serde::de::Error::custom)?) } } } #[cfg(test)] mod tests { use super::*; #[test] fn serde() { let wp = WordPiece::default(); let wp_s = "{\ \"type\":\"WordPiece\",\ \"unk_token\":\"[UNK]\",\ \"continuing_subword_prefix\":\"##\",\ \"max_input_chars_per_word\":100,\ \"vocab\":{}\ }"; assert_eq!(serde_json::to_string(&wp).unwrap(), wp_s); assert_eq!(serde_json::from_str::<WordPiece>(wp_s).unwrap(), wp); } #[test] fn deserialization_should_fail() { let missing_unk = "{\ \"type\":\"WordPiece\",\ \"continuing_subword_prefix\":\"##\",\ \"max_input_chars_per_word\":100,\ \"vocab\":{}\ }"; assert!(serde_json::from_str::<WordPiece>(missing_unk) .unwrap_err() .to_string() .starts_with("missing field `unk_token`")); let wrong_type = "{\ \"type\":\"WordLevel\",\ \"unk_token\":\"[UNK]\",\ \"vocab\":{}\ }"; assert!(serde_json::from_str::<WordPiece>(wrong_type) .unwrap_err() .to_string() .starts_with("invalid value: string \"WordLevel\", expected WordPiece")); } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/wordpiece/trainer.rs
use super::WordPiece; use crate::models::bpe::{BpeTrainer, BpeTrainerBuilder, BPE}; use crate::tokenizer::{AddedToken, Result, Trainer}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; /// A `WordPieceTrainerBuilder` can be used to create a `WordPieceTrainer` with a custom /// configuration. pub struct WordPieceTrainerBuilder { bpe_trainer_builder: BpeTrainerBuilder, } impl Default for WordPieceTrainerBuilder { fn default() -> Self { Self { bpe_trainer_builder: BpeTrainerBuilder::new().continuing_subword_prefix("##".into()), } } } impl WordPieceTrainerBuilder { /// Constructs a new `WordPieceTrainerBuilder` pub fn new() -> Self { Self::default() } /// Set the expected minimum frequency #[must_use] pub fn min_frequency(mut self, frequency: u32) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.min_frequency(frequency); self } /// Set the vocabulary size #[must_use] pub fn vocab_size(mut self, size: usize) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.vocab_size(size); self } /// Set whether to show progress #[must_use] pub fn show_progress(mut self, show: bool) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.show_progress(show); self } /// Set the special tokens #[must_use] pub fn special_tokens(mut self, tokens: Vec<AddedToken>) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.special_tokens(tokens); self } /// Set whether to limit the alphabet #[must_use] pub fn limit_alphabet(mut self, limit: usize) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.limit_alphabet(limit); self } /// Set the initial alphabet #[must_use] pub fn initial_alphabet(mut self, alphabet: HashSet<char>) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.initial_alphabet(alphabet); self } /// Set the continuing_subword_prefix #[must_use] pub fn continuing_subword_prefix(mut self, prefix: String) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.continuing_subword_prefix(prefix); self } /// Set the end_of_word_suffix #[must_use] pub fn end_of_word_suffix(mut self, suffix: String) -> Self { self.bpe_trainer_builder = self.bpe_trainer_builder.end_of_word_suffix(suffix); self } /// Constructs the final BpeTrainer pub fn build(self) -> WordPieceTrainer { let bpe_trainer = self.bpe_trainer_builder.build(); WordPieceTrainer { bpe_trainer } } } /// Trains a `WordPiece` model. #[derive(Default, Deserialize, Serialize)] pub struct WordPieceTrainer { bpe_trainer: BpeTrainer, } impl WordPieceTrainer { pub fn min_frequency(&self) -> u32 { self.bpe_trainer.min_frequency } pub fn set_min_frequency(&mut self, freq: u32) { self.bpe_trainer.min_frequency = freq; } pub fn vocab_size(&self) -> usize { self.bpe_trainer.vocab_size } pub fn set_vocab_size(&mut self, size: usize) { self.bpe_trainer.vocab_size = size; } pub fn show_progress(&self) -> bool { self.bpe_trainer.show_progress } pub fn set_show_progress(&mut self, show_progress: bool) { self.bpe_trainer.show_progress = show_progress; } pub fn special_tokens(&self) -> &[AddedToken] { &self.bpe_trainer.special_tokens } pub fn set_special_tokens(&mut self, special_tokens: Vec<AddedToken>) { self.bpe_trainer.special_tokens = special_tokens; } pub fn limit_alphabet(&self) -> Option<usize> { self.bpe_trainer.limit_alphabet } pub fn set_limit_alphabet(&mut self, limit: Option<usize>) { self.bpe_trainer.limit_alphabet = limit; } pub fn initial_alphabet(&self) -> &HashSet<char> { &self.bpe_trainer.initial_alphabet } pub fn set_initial_alphabet(&mut self, alphabet: HashSet<char>) { self.bpe_trainer.initial_alphabet = alphabet; } pub fn continuing_subword_prefix(&self) -> &Option<String> { &self.bpe_trainer.continuing_subword_prefix } pub fn set_continuing_subword_prefix(&mut self, prefix: Option<String>) { self.bpe_trainer.continuing_subword_prefix = prefix; } pub fn end_of_word_suffix(&self) -> &Option<String> { &self.bpe_trainer.end_of_word_suffix } pub fn set_end_of_word_suffix(&mut self, suffix: Option<String>) { self.bpe_trainer.end_of_word_suffix = suffix; } pub fn builder() -> WordPieceTrainerBuilder { WordPieceTrainerBuilder::default() } pub fn train(&self, model: &mut WordPiece) -> Result<Vec<AddedToken>> { let mut bpe = BPE::default(); let special_tokens = self.bpe_trainer.train(&mut bpe)?; let new_wordpiece = WordPiece::from_bpe(&bpe); // Transfer the vocab model.vocab = new_wordpiece.vocab; model.vocab_r = new_wordpiece.vocab_r; // The continuing_subword_prefix is the only other option to be overriden by the trainer model.continuing_subword_prefix = new_wordpiece.continuing_subword_prefix; Ok(special_tokens) } } impl Trainer for WordPieceTrainer { type Model = WordPiece; fn train(&self, model: &mut WordPiece) -> Result<Vec<AddedToken>> { self.train(model) } fn should_show_progress(&self) -> bool { self.bpe_trainer.should_show_progress() } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { self.bpe_trainer.feed(iterator, process) } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/wordlevel/mod.rs
use super::OrderedVocabIter; use crate::tokenizer::{Model, Result, Token}; use serde_json::Value; use std::collections::HashMap; use std::fs::File; use std::io::{BufReader, Read, Write}; use std::path::{Path, PathBuf}; mod serialization; mod trainer; // Re-export pub use trainer::*; type Vocab = HashMap<String, u32>; #[derive(thiserror::Error, Debug)] pub enum Error { #[error("WordLevel error: Missing [UNK] token from the vocabulary")] MissingUnkToken, #[error("Bad vocabulary json file")] BadVocabulary, } struct Config { files: Option<String>, vocab: HashMap<String, u32>, unk_token: String, } /// A `WordLevelBuilder` can be used to create a `WordLevel` /// model with a custom configuration. pub struct WordLevelBuilder { config: Config, } impl Default for WordLevelBuilder { fn default() -> Self { Self { config: Config { files: None, vocab: HashMap::new(), unk_token: String::from("<unk>"), }, } } } impl WordLevelBuilder { /// Construct a new `WordLevelBuilder`. pub fn new() -> Self { Self::default() } /// Set the input files. #[must_use] pub fn files(mut self, vocab: String) -> Self { self.config.files = Some(vocab); self } /// Set the vocab (token -> ID) mapping. #[must_use] pub fn vocab(mut self, vocab: HashMap<String, u32>) -> Self { self.config.vocab = vocab; self } /// The the `UNK` token for the vocab. #[must_use] pub fn unk_token(mut self, unk_token: String) -> Self { self.config.unk_token = unk_token; self } /// Contructs a `WordLevel` model that uses the `WordLevelBuilder`'s configuration. pub fn build(mut self) -> Result<WordLevel> { if let Some(vocab) = self.config.files { self.config.vocab = WordLevel::read_file(&vocab)?; } let vocab_r = self .config .vocab .iter() .map(|(key, val)| (*val, key.to_owned())) .collect(); Ok(WordLevel { vocab: self.config.vocab, vocab_r, unk_token: self.config.unk_token, }) } } #[derive(PartialEq, Clone, Eq)] pub struct WordLevel { vocab: HashMap<String, u32>, vocab_r: HashMap<u32, String>, pub unk_token: String, } impl std::fmt::Debug for WordLevel { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("WordLevel") .field("unk_token", &self.unk_token) .field("vocab", &self.vocab.len()) .finish() } } impl WordLevel { pub fn builder() -> WordLevelBuilder { WordLevelBuilder::new() } pub fn read_file(vocab_path: &str) -> Result<Vocab> { let vocab_file = File::open(vocab_path)?; let mut vocab_file = BufReader::new(vocab_file); let mut buffer = String::new(); let mut vocab = HashMap::new(); vocab_file.read_to_string(&mut buffer)?; let json: Value = serde_json::from_str(&buffer)?; match json { Value::Object(m) => { for (token, id) in m { if let Value::Number(id) = id { let id = id.as_u64().ok_or(Error::BadVocabulary)? as u32; vocab.insert(token, id); } } } _ => return Err(Box::new(Error::BadVocabulary)), }; Ok(vocab) } /// Initialize a WordLevel model from vocab and merges file. pub fn from_file(vocab_path: &str, unk_token: String) -> Result<WordLevel> { let vocab = WordLevel::read_file(vocab_path)?; Self::builder().vocab(vocab).unk_token(unk_token).build() } } impl Default for WordLevel { fn default() -> Self { Self { vocab: HashMap::new(), vocab_r: HashMap::new(), unk_token: String::from("<unk>"), } } } impl Model for WordLevel { type Trainer = WordLevelTrainer; fn tokenize(&self, token: &str) -> Result<Vec<Token>> { if let Some(&id) = self.vocab.get(token) { Ok(vec![Token { id, value: token.to_owned(), offsets: (0, token.len()), }]) } else if let Some(&unk_id) = self.vocab.get(&self.unk_token) { Ok(vec![Token { id: unk_id, value: self.unk_token.to_owned(), offsets: (0, token.len()), }]) } else { Err(Box::new(Error::MissingUnkToken)) } } fn token_to_id(&self, token: &str) -> Option<u32> { self.vocab.get(token).copied() } fn id_to_token(&self, id: u32) -> Option<String> { self.vocab_r.get(&id).cloned() } fn get_vocab(&self) -> HashMap<String, u32> { self.vocab.clone() } fn get_vocab_size(&self) -> usize { self.vocab.keys().len() } fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> { let vocab_file_name = match name { Some(name) => format!("{}-vocab.json", name), None => "vocab.json".to_string(), }; // Write vocab.json let vocab_path: PathBuf = [folder, Path::new(vocab_file_name.as_str())] .iter() .collect(); let mut vocab_file = File::create(&vocab_path)?; let order_vocab_iter = OrderedVocabIter::new(&self.vocab_r); let serialized = serde_json::to_string(&order_vocab_iter)?; vocab_file.write_all(serialized.as_bytes())?; Ok(vec![vocab_path]) } fn get_trainer(&self) -> Self::Trainer { WordLevelTrainer::default() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_tokenize_unk() { let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)] .iter() .cloned() .collect(); let wordlevel = WordLevelBuilder::default() .vocab(vocab) .unk_token("<unk>".to_string()) .build() .unwrap(); let tokens = wordlevel.tokenize("c").unwrap(); assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]); let tokens = wordlevel.tokenize("a").unwrap(); assert_eq!(tokens, vec![Token::new(1u32, "a".into(), (0, 1)),]); } #[test] fn test_tokenize_missing_unk_token() { let vocab: Vocab = [("a".into(), 0), ("b".into(), 1)].iter().cloned().collect(); let wordlevel = WordLevelBuilder::default().vocab(vocab).build().unwrap(); let tokens = wordlevel.tokenize("a").unwrap(); assert_eq!(tokens, vec![Token::new(0u32, "a".into(), (0, 1)),]); let error = wordlevel.tokenize("c").err().unwrap(); assert!(error.is::<Error>()); } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/wordlevel/serialization.rs
use super::{super::OrderedVocabIter, WordLevel, WordLevelBuilder}; use serde::{ de::{MapAccess, Visitor}, ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer, }; use std::collections::HashSet; impl Serialize for WordLevel { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut model = serializer.serialize_struct("WordLevel", 3)?; let ordered_vocab = OrderedVocabIter::new(&self.vocab_r); model.serialize_field("type", "WordLevel")?; model.serialize_field("vocab", &ordered_vocab)?; model.serialize_field("unk_token", &self.unk_token)?; model.end() } } impl<'de> Deserialize<'de> for WordLevel { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_struct( "WordLevel", &["type", "vocab", "unk_token"], WordLevelVisitor, ) } } struct WordLevelVisitor; impl<'de> Visitor<'de> for WordLevelVisitor { type Value = WordLevel; fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { write!(fmt, "struct WordLevel") } fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error> where V: MapAccess<'de>, { let mut builder = WordLevelBuilder::new(); let mut missing_fields = vec![ // for retrocompatibility the "type" field is not mandatory "unk_token", "vocab", ] .into_iter() .collect::<HashSet<_>>(); while let Some(key) = map.next_key::<String>()? { match key.as_ref() { "vocab" => builder = builder.vocab(map.next_value()?), "unk_token" => builder = builder.unk_token(map.next_value()?), "type" => match map.next_value()? { "WordLevel" => {} u => { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(u), &"WordLevel", )) } }, _ => {} } missing_fields.remove::<str>(&key); } if !missing_fields.is_empty() { Err(serde::de::Error::missing_field( missing_fields.iter().next().unwrap(), )) } else { Ok(builder.build().map_err(serde::de::Error::custom)?) } } } #[cfg(test)] mod tests { use crate::models::wordlevel::{Vocab, WordLevel, WordLevelBuilder}; #[test] fn serde() { let wl = WordLevel::default(); let wl_s = r#"{"type":"WordLevel","vocab":{},"unk_token":"<unk>"}"#; assert_eq!(serde_json::to_string(&wl).unwrap(), wl_s); assert_eq!(serde_json::from_str::<WordLevel>(wl_s).unwrap(), wl); } #[test] fn incomplete_vocab() { let vocab: Vocab = [("<unk>".into(), 0), ("b".into(), 2)] .iter() .cloned() .collect(); let wordlevel = WordLevelBuilder::default() .vocab(vocab) .unk_token("<unk>".to_string()) .build() .unwrap(); let wl_s = r#"{"type":"WordLevel","vocab":{"<unk>":0,"b":2},"unk_token":"<unk>"}"#; assert_eq!(serde_json::to_string(&wordlevel).unwrap(), wl_s); assert_eq!(serde_json::from_str::<WordLevel>(wl_s).unwrap(), wordlevel); } #[test] fn deserialization_should_fail() { let missing_unk = r#"{"type":"WordLevel","vocab":{}}"#; assert!(serde_json::from_str::<WordLevel>(missing_unk) .unwrap_err() .to_string() .starts_with("missing field `unk_token`")); let wrong_type = r#"{"type":"WordPiece","vocab":{}}"#; assert!(serde_json::from_str::<WordLevel>(wrong_type) .unwrap_err() .to_string() .starts_with("invalid value: string \"WordPiece\", expected WordLevel")); } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/wordlevel/trainer.rs
use super::WordLevel; use crate::utils::parallelism::*; use crate::{AddedToken, Result, Trainer}; use serde::{Deserialize, Serialize}; use std::cmp::Ordering; use std::collections::HashMap; #[non_exhaustive] #[derive(Debug, Clone, Builder, Serialize, Deserialize)] pub struct WordLevelTrainer { /// The minimum frequency a word must have to be part of the vocabulary #[builder(default = "0")] pub min_frequency: u32, /// The target vocabulary size #[builder(default = "30_000")] pub vocab_size: usize, /// Whether to show progress while training #[builder(default = "true")] pub show_progress: bool, /// A list of special tokens that the model should know of #[builder(default)] pub special_tokens: Vec<AddedToken>, #[builder(default, private)] words: HashMap<String, u32>, } impl Default for WordLevelTrainer { fn default() -> Self { Self::builder().build().unwrap() } } impl WordLevelTrainer { pub fn builder() -> WordLevelTrainerBuilder { WordLevelTrainerBuilder::default() } fn do_train( &self, word_counts: &HashMap<String, u32>, model: &mut WordLevel, ) -> Result<Vec<AddedToken>> { let mut ordered_counts = word_counts.iter().collect::<Vec<_>>(); //sort the word counts first by inverse counts and then by word, in order //to keep the sorting deterministic in case of equal counts let cmp = |l: &(&String, &u32), r: &(&String, &u32)| -> Ordering { let count_comp: Ordering = l.1.cmp(r.1); if count_comp != Ordering::Equal { return count_comp.reverse(); } l.0.cmp(r.0) }; ordered_counts.sort_by(cmp); let word_level = WordLevel::builder() .vocab( self.special_tokens .iter() .map(|token| token.content.clone()) .chain( ordered_counts .into_iter() .filter(|(_, n)| **n >= self.min_frequency) .map(|(w, _)| w.to_owned()), ) .take(self.vocab_size) .enumerate() .map(|(i, w)| (w, i as u32)) .collect(), ) .build()?; // Transfer the vocab model.vocab = word_level.vocab; model.vocab_r = word_level.vocab_r; Ok(self.special_tokens.clone()) } } impl Trainer for WordLevelTrainer { type Model = WordLevel; /// Train a WordLevel model fn train(&self, model: &mut WordLevel) -> Result<Vec<AddedToken>> { self.do_train(&self.words, model) } /// Whether we should show progress fn should_show_progress(&self) -> bool { self.show_progress } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { let words: Result<HashMap<String, u32>> = iterator .maybe_par_bridge() .map(|sequence| { let words = process(sequence.as_ref())?; let mut map = HashMap::new(); for word in words { map.entry(word).and_modify(|c| *c += 1).or_insert(1); } Ok(map) }) .reduce( || Ok(HashMap::new()), |acc, ws| { let mut acc = acc?; for (k, v) in ws? { acc.entry(k).and_modify(|c| *c += v).or_insert(v); } Ok(acc) }, ); self.words = words?; Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_train() { let word_counts: HashMap<String, u32> = [ ("the".into(), 25), ("roses".into(), 22), ("are".into(), 24), ("red".into(), 12), ("voilets".into(), 10), ("blue".into(), 16), ] .iter() .cloned() .collect(); let mut trainer = WordLevelTrainer { vocab_size: 5, ..Default::default() }; let mut model = WordLevel::default(); trainer.do_train(&word_counts, &mut model).unwrap(); let expected_vocab: HashMap<String, u32> = [ ("the".into(), 0), ("are".into(), 1), ("roses".into(), 2), ("blue".into(), 3), ("red".into(), 4), ] .iter() .cloned() .collect(); assert_eq!(model.vocab, expected_vocab); // If we specify a min_frequency trainer.min_frequency = 15; let mut model = WordLevel::default(); trainer.do_train(&word_counts, &mut model).unwrap(); let expected_vocab: HashMap<String, u32> = [ ("the".into(), 0), ("are".into(), 1), ("roses".into(), 2), ("blue".into(), 3), ] .iter() .cloned() .collect(); assert_eq!(model.vocab, expected_vocab); } }
0
hf_public_repos/tokenizers/tokenizers/src/models
hf_public_repos/tokenizers/tokenizers/src/models/unigram/lattice.rs
use rand::distributions::WeightedIndex; use rand::prelude::*; use std::cell::RefCell; use std::cmp::{min, Ordering}; use std::collections::BinaryHeap; use std::rc::Rc; type NodeRef = Rc<RefCell<Node>>; type HypothesisRef = Rc<RefCell<Hypothesis>>; type Agenda = BinaryHeap<Hypothesis>; struct Hypothesis { node_ref: NodeRef, next: Option<HypothesisRef>, fx: f64, gx: f64, } impl Hypothesis { pub fn new(node_ref: NodeRef, next: Option<HypothesisRef>, fx: f64, gx: f64) -> Self { Self { node_ref, next, fx, gx, } } } impl PartialEq for Hypothesis { fn eq(&self, other: &Self) -> bool { self.fx == other.fx } } impl Eq for Hypothesis {} impl PartialOrd for Hypothesis { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } // TODO Maybe use Ordered Floats (https://docs.rs/ordered-float/1.0.2/ordered_float/) impl Ord for Hypothesis { fn cmp(&self, other: &Self) -> Ordering { if self.fx < other.fx { Ordering::Less } else { Ordering::Greater } } } /// Structure to implement Viterbi algorithm to find the best encoding, or sample /// from all possible encodings of a given sentence. #[derive(Debug)] pub struct Lattice<'a> { pub(super) sentence: &'a str, len: usize, nodes: Vec<NodeRef>, pub(super) begin_nodes: Vec<Vec<NodeRef>>, pub(super) end_nodes: Vec<Vec<NodeRef>>, _bos_id: usize, _eos_id: usize, } impl std::fmt::Display for Lattice<'_> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let display_pieces = |nodes: &Vec<Vec<NodeRef>>| { nodes .iter() .map(|l| { l.iter() .map(|n| self.piece(&n.borrow())) .collect::<Vec<_>>() }) .collect::<Vec<_>>() }; f.debug_struct("Lattice") .field("sentence", &self.sentence) .field("begin_nodes", &display_pieces(&self.begin_nodes)) .field("end_nodes", &display_pieces(&self.end_nodes)) .finish() } } /// A node from the lattice, that helps reconstruct the underlying `String` #[derive(Debug, Clone)] pub struct Node { // Vocabulary id pub(super) id: usize, // Local lattice identifier pub(super) node_id: usize, pos: usize, length: usize, prev: Option<NodeRef>, backtrace_score: f64, score: f64, } impl PartialEq for Node { fn eq(&self, other: &Node) -> bool { self.id == other.id } } impl Node { pub fn new(id: usize, node_id: usize, pos: usize, length: usize, score: f64) -> Self { Self { id, node_id, pos, length, prev: None, score, backtrace_score: 0.0, } } } /// Returns log(exp(x) + exp(y)). /// if init_mode is true, returns log(exp(y)) == y. /// log(\sum_i exp(a[i])) can be computed as /// for (int i = 0; i < a.size(); ++i) /// x = LogSumExp(x, a[i], i == 0); fn log_sum_exp(x: f64, y: f64, init_mode: bool) -> f64 { if init_mode { y } else { let (vmin, vmax) = if x > y { (y, x) } else { (x, y) }; let k_minus_log_epsilon = 50.0; if vmax > vmin + k_minus_log_epsilon { vmax } else { vmax + ((vmin - vmax).exp() + 1.0).ln() } } } impl<'a> Lattice<'a> { pub fn from(sentence: &'a str, bos_id: usize, eos_id: usize) -> Self { let len = sentence.len(); let k_reserved_node_size = 16; // We are adding 2 tokens, bos and eos let mut nodes: Vec<NodeRef> = Vec::with_capacity(k_reserved_node_size); let mut begin_nodes = vec![Vec::with_capacity(k_reserved_node_size); len + 1]; let mut end_nodes = vec![Vec::with_capacity(k_reserved_node_size); len + 1]; let bos = Rc::new(RefCell::new(Node::new(bos_id, 0, 0, 0, 0.0))); let eos = Rc::new(RefCell::new(Node::new(eos_id, 1, len, 0, 0.0))); begin_nodes[len].push(Rc::clone(&eos)); end_nodes[0].push(Rc::clone(&bos)); nodes.push(bos); nodes.push(eos); Self { sentence, len, nodes, begin_nodes, end_nodes, _bos_id: bos_id, _eos_id: eos_id, } } pub fn insert(&mut self, pos: usize, length: usize, score: f64, id: usize) { let node_id = self.nodes.len(); let node = Rc::new(RefCell::new(Node::new(id, node_id, pos, length, score))); self.begin_nodes[pos].push(Rc::clone(&node)); self.end_nodes[pos + length].push(Rc::clone(&node)); self.nodes.push(node); } pub fn viterbi(&mut self) -> Vec<NodeRef> { let len = self.len; let mut pos = 0; while pos <= len { if self.begin_nodes[pos].is_empty() { return vec![]; } for rnode in &self.begin_nodes[pos] { rnode.borrow_mut().prev = None; let mut best_score = 0.0; let mut best_node: Option<NodeRef> = None; for lnode in &self.end_nodes[pos] { let score = lnode.borrow().backtrace_score + rnode.borrow().score; if best_node.is_none() || score > best_score { // TODO can we remove this clone ? best_node = Some(lnode.clone()); best_score = score } } match best_node { Some(bnode) => { rnode.borrow_mut().prev = Some(Rc::clone(&bnode)); rnode.borrow_mut().backtrace_score = best_score; } None => return vec![], } } if let Some(c) = self.sentence[pos..].chars().next() { pos += c.len_utf8(); } else { break; } } let mut results: Vec<NodeRef> = vec![]; let root = self.begin_nodes[len][0].borrow(); let prev = root.prev.as_ref(); if prev.is_none() { return vec![]; } let mut node: NodeRef = prev.unwrap().clone(); while node.borrow().prev.is_some() { results.push(node.clone()); let n = node.borrow().clone(); node = n.prev.as_ref().unwrap().clone(); } results.reverse(); results } pub fn piece(&self, node: &Node) -> String { self.sentence[node.pos..node.pos + node.length].to_owned() } pub fn tokens(&mut self) -> Vec<String> { self.viterbi() .iter() .map(|node| self.piece(&node.borrow())) .collect() } pub fn nbest(&mut self, n: usize) -> Vec<Vec<NodeRef>> { match n { 0 => vec![], 1 => vec![self.viterbi()], _ => { // let k_reserved_hypothesis_size = 512; let mut agenda: Agenda = BinaryHeap::new(); let mut hypotheses: Vec<Vec<NodeRef>> = vec![]; let eos = self.eos_node(); let score = eos.borrow().score; let hypo = Hypothesis::new(eos, None, score, score); agenda.push(hypo); // Fill backtrace scores self.viterbi(); while !agenda.is_empty() { let top = Rc::new(RefCell::new(agenda.pop().unwrap())); let node = Rc::clone(&top.borrow().node_ref); if node.borrow().id == self.bos_node().borrow().id { let mut hypothesis = vec![]; let mut next: HypothesisRef = Rc::clone(top.borrow().next.as_ref().unwrap()); while next.borrow().next.is_some() { hypothesis.push(next.borrow().node_ref.clone()); let c: HypothesisRef = next.clone(); // let c: Ref<Hypothesis> = next.clone().borrow(); next = Rc::clone(c.borrow().next.as_ref().unwrap()); } hypotheses.push(hypothesis); if hypotheses.len() == n { return hypotheses; } } else { for lnode in &self.end_nodes[node.borrow().pos] { let top_gx = top.borrow().gx; let fx = lnode.borrow().backtrace_score + top_gx; let gx = lnode.borrow().score + top_gx; let hyp = Hypothesis::new(Rc::clone(lnode), Some(Rc::clone(&top)), fx, gx); agenda.push(hyp); } // When the input is too long or contains duplicated phrases, // `agenda` will get extremely big. Here we avoid this case by // dynamically shrinking the agenda. let k_max_agenda_size = 100_000; let k_min_agenda_size = 512; if agenda.len() > k_max_agenda_size { let mut new_agenda = BinaryHeap::new(); let len = min(k_min_agenda_size, n * 10); for _i in 0..len { new_agenda.push(agenda.pop().unwrap()); } agenda = new_agenda; } } } hypotheses } } } pub fn nbest_tokens(&mut self, n: usize) -> Vec<Vec<String>> { self.nbest(n) .iter() .map(|v| v.iter().map(|node| self.piece(&node.borrow())).collect()) .collect() } pub fn len(&self) -> usize { self.len } pub fn is_empty(&self) -> bool { self.len == 0 } pub fn bos_node(&self) -> NodeRef { Rc::clone(&self.end_nodes[0][0]) } pub fn eos_node(&self) -> NodeRef { Rc::clone(&self.begin_nodes[self.len][0]) } pub fn surface(&self, n: usize) -> &str { match self.sentence.char_indices().nth(n) { Some((pos, _)) => &self.sentence[pos..], None => "", } } pub fn sentence(&self) -> &str { self.sentence } pub fn populate_marginal(&self, freq: f64, expected: &mut [f64]) -> f64 { let len = self.len(); let n_nodes = self.nodes.len(); let mut alpha = vec![0.0; n_nodes]; let mut beta = vec![0.0; n_nodes]; for pos in 0..=len { for rnode in &self.begin_nodes[pos] { for lnode in &self.end_nodes[pos] { let lid = lnode.borrow().node_id; let rid = rnode.borrow().node_id; alpha[rid] = log_sum_exp( alpha[rid], lnode.borrow().score + alpha[lid], *lnode == self.end_nodes[pos][0], ); } } } for pos in (0..=len).rev() { // let rpos = len - pos; for lnode in &self.end_nodes[pos] { for rnode in &self.begin_nodes[pos] { let lid = lnode.borrow().node_id; let rid = rnode.borrow().node_id; beta[lid] = log_sum_exp( beta[lid], rnode.borrow().score + beta[rid], *rnode == self.begin_nodes[pos][0], ); } } } let eos_id = self.begin_nodes[len][0].borrow().node_id; let z = alpha[eos_id]; for pos in 0..len { for node in &self.begin_nodes[pos] { let node_id = node.borrow().node_id; let id = node.borrow().id; let a = alpha[node_id]; let b = beta[node_id]; let total = a + node.borrow().score + b - z; let update = freq * total.exp(); expected[id] += update; } } freq * z } pub fn sample(&self, theta: f64) -> Vec<NodeRef> { let len = self.len(); if len == 0 { return vec![]; } let mut alpha = vec![0.0; self.nodes.len()]; for pos in 0..=len { for rnode in &self.begin_nodes[pos] { for lnode in &self.end_nodes[pos] { let lid = lnode.borrow().node_id; let rid = rnode.borrow().node_id; alpha[rid] = log_sum_exp( alpha[rid], theta * (lnode.borrow().score + alpha[lid]), *lnode == self.end_nodes[pos][0], ); } } } let mut rng = thread_rng(); let mut results: Vec<NodeRef> = vec![]; let mut probs: Vec<f64> = vec![]; let mut z = alpha[self.eos_node().borrow().node_id]; let mut node = self.eos_node(); loop { probs.clear(); let pos = node.borrow().pos; for lnode in &self.end_nodes[pos] { let lid = lnode.borrow().node_id; probs.push((alpha[lid] + theta * lnode.borrow().score - z).exp()) } let dist = WeightedIndex::new(&probs).unwrap(); let index = dist.sample(&mut rng); node = Rc::clone(&self.end_nodes[pos][index]); if node == self.bos_node() { break; } z = alpha[node.borrow().node_id]; results.push(Rc::clone(&node)); } results.reverse(); results } pub fn sample_token(&self, theta: f64) -> Vec<String> { self.sample(theta) .iter() .map(|node| self.piece(&node.borrow())) .collect() } } #[cfg(test)] mod tests { use super::*; use assert_approx_eq::assert_approx_eq; #[test] fn set_sentence() { let lattice = Lattice::from("", 1, 2); assert_eq!(lattice.len(), 0); let lattice = Lattice::from("", 1, 2); assert_eq!(lattice.len(), 0); assert_eq!(lattice.sentence(), ""); assert_eq!(lattice.surface(0), ""); let lattice = Lattice::from("test", 1, 2); assert_eq!(lattice.len(), 4); assert_eq!(lattice.sentence(), "test"); assert_eq!(lattice.surface(0), "test"); assert_eq!(lattice.surface(1), "est"); assert_eq!(lattice.surface(2), "st"); assert_eq!(lattice.surface(3), "t"); let bos = lattice.bos_node(); let eos = lattice.eos_node(); assert_eq!(bos.borrow().id, 1); assert_eq!(eos.borrow().id, 2); assert_eq!( lattice.end_nodes[0].first().unwrap().borrow().id, bos.borrow().id ); assert_eq!( lattice.begin_nodes[4].first().unwrap().borrow().id, eos.borrow().id ); let lattice = Lattice::from("テストab", 1, 2); assert_eq!(lattice.len(), 11); assert_eq!(lattice.sentence(), "テストab"); assert_eq!(lattice.surface(0), "テストab"); assert_eq!(lattice.surface(1), "ストab"); assert_eq!(lattice.surface(2), "トab"); assert_eq!(lattice.surface(3), "ab"); assert_eq!(lattice.surface(4), "b"); } #[test] fn insert_test() { let mut lattice = Lattice::from("ABあい", 1, 2); lattice.insert(0, 1, 0.0, 3); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 3, 0.0, 5); lattice.insert(5, 3, 0.0, 6); lattice.insert(0, 2, 0.0, 7); lattice.insert(1, 4, 0.0, 8); lattice.insert(2, 6, 0.0, 9); // 0 & 1 are bos and eos let node0 = lattice.nodes[2].borrow(); let node1 = lattice.nodes[3].borrow(); let node2 = lattice.nodes[4].borrow(); let node3 = lattice.nodes[5].borrow(); let node4 = lattice.nodes[6].borrow(); let node5 = lattice.nodes[7].borrow(); let node6 = lattice.nodes[8].borrow(); assert_eq!(lattice.piece(&node0), "A"); assert_eq!(lattice.piece(&node1), "B"); assert_eq!(lattice.piece(&node2), "あ"); assert_eq!(lattice.piece(&node3), "い"); assert_eq!(lattice.piece(&node4), "AB"); assert_eq!(lattice.piece(&node5), "Bあ"); assert_eq!(lattice.piece(&node6), "あい"); assert_eq!(node0.pos, 0); assert_eq!(node1.pos, 1); assert_eq!(node2.pos, 2); assert_eq!(node3.pos, 5); assert_eq!(node4.pos, 0); assert_eq!(node5.pos, 1); assert_eq!(node6.pos, 2); assert_eq!(node0.length, 1); assert_eq!(node1.length, 1); assert_eq!(node2.length, 3); assert_eq!(node3.length, 3); assert_eq!(node4.length, 2); assert_eq!(node5.length, 4); assert_eq!(node6.length, 6); assert_eq!(lattice.bos_node().borrow().id, 1); assert_eq!(lattice.eos_node().borrow().id, 2); assert_eq!(node0.id, 3); assert_eq!(node1.id, 4); assert_eq!(node2.id, 5); assert_eq!(node3.id, 6); assert_eq!(node4.id, 7); assert_eq!(node5.id, 8); assert_eq!(node6.id, 9); assert_eq!(lattice.begin_nodes[0].len(), 2); assert_eq!(lattice.begin_nodes[1].len(), 2); assert_eq!(lattice.begin_nodes[2].len(), 2); assert_eq!(lattice.begin_nodes[5].len(), 1); assert_eq!(lattice.begin_nodes[8].len(), 1); assert_eq!(lattice.end_nodes[0].len(), 1); assert_eq!(lattice.end_nodes[1].len(), 1); assert_eq!(lattice.end_nodes[2].len(), 2); assert_eq!(lattice.end_nodes[5].len(), 2); assert_eq!(lattice.end_nodes[8].len(), 2); assert_eq!(lattice.begin_nodes[0][0].borrow().id, node0.id); assert_eq!(lattice.begin_nodes[0][1].borrow().id, node4.id); assert_eq!(lattice.begin_nodes[1][0].borrow().id, node1.id); assert_eq!(lattice.begin_nodes[1][1].borrow().id, node5.id); assert_eq!(lattice.begin_nodes[2][0].borrow().id, node2.id); assert_eq!(lattice.begin_nodes[2][1].borrow().id, node6.id); assert_eq!(lattice.begin_nodes[5][0].borrow().id, node3.id); assert_eq!( lattice.eos_node().borrow().id, lattice.begin_nodes[8][0].borrow().id ); assert_eq!( lattice.bos_node().borrow().id, lattice.end_nodes[0][0].borrow().id ); assert_eq!(node0.id, lattice.end_nodes[1][0].borrow().id); assert_eq!(node1.id, lattice.end_nodes[2][0].borrow().id); assert_eq!(node4.id, lattice.end_nodes[2][1].borrow().id); assert_eq!(node2.id, lattice.end_nodes[5][0].borrow().id); assert_eq!(node5.id, lattice.end_nodes[5][1].borrow().id); assert_eq!(node3.id, lattice.end_nodes[8][0].borrow().id); assert_eq!(node6.id, lattice.end_nodes[8][1].borrow().id); } #[test] fn test_viterbi() { let mut lattice = Lattice::from("ABC", 1, 2); assert_eq!(lattice.viterbi(), vec![]); // Still incomplete lattice.insert(0, 1, 0.0, 3); assert_eq!(lattice.viterbi(), vec![]); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 1, 0.0, 5); // XXX: In sentence piece this is not tested, still incomplete ? assert_eq!(lattice.viterbi().len(), 3); } #[test] fn test_viterbi2() { let mut lattice = Lattice::from("ABC", 1, 2); lattice.insert(0, 1, 0.0, 3); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 1, 0.0, 5); assert_eq!(lattice.tokens(), ["A", "B", "C"]); lattice.insert(0, 2, 2.0, 6); assert_eq!(lattice.tokens(), ["AB", "C"]); lattice.insert(1, 2, 5.0, 7); assert_eq!(lattice.tokens(), ["A", "BC"]); lattice.insert(0, 3, 10.0, 8); assert_eq!(lattice.tokens(), ["ABC"]); } #[test] fn test_nbest() { let mut lattice = Lattice::from("ABC", 1, 2); lattice.insert(0, 1, 0.0, 3); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 1, 0.0, 5); lattice.insert(0, 2, 2.0, 6); lattice.insert(1, 2, 5.0, 7); lattice.insert(0, 3, 10.0, 8); let nbests = lattice.nbest_tokens(10); assert_eq!( nbests, vec![ vec!["ABC"], vec!["A", "BC"], vec!["AB", "C"], vec!["A", "B", "C"] ] ); assert!(lattice.nbest_tokens(0).is_empty()); assert_eq!(lattice.nbest_tokens(1), vec![vec!["ABC"]]); } #[test] fn test_log_sum_exp() { let mut x = 0.0; let v: Vec<f64> = vec![1.0, 2.0, 3.0]; for (i, y) in v.iter().enumerate() { x = log_sum_exp(x, *y, i == 0); } assert_approx_eq!(x, v.iter().map(|n| n.exp()).sum::<f64>().ln(), 0.001); } #[test] fn test_populate() { let mut lattice = Lattice::from("ABC", 1, 2); lattice.insert(0, 1, 1.0, 3); // A lattice.insert(1, 1, 1.2, 4); // B lattice.insert(2, 1, 2.5, 5); // C lattice.insert(0, 2, 3.0, 6); // AB lattice.insert(1, 2, 4.0, 7); // BC lattice.insert(0, 3, 2.0, 8); // ABC let mut probs = vec![0.0; 9]; let p1 = (1.0_f64 + 1.2 + 2.5).exp(); let p2 = (3.0_f64 + 2.5).exp(); let p3 = (1.0_f64 + 4.0).exp(); let p4 = 2.0_f64.exp(); let z = p1 + p2 + p3 + p4; let log_z = lattice.populate_marginal(1.0, &mut probs); assert_approx_eq!(log_z, z.ln(), 0.001); assert_approx_eq!(probs[0], 0.0, 0.001); assert_approx_eq!(probs[1], 0.0, 0.001); assert_approx_eq!(probs[2], 0.0, 0.001); assert_approx_eq!(probs[3], (p1 + p3) / z, 0.001); assert_approx_eq!(probs[4], (p1) / z, 0.001); assert_approx_eq!(probs[5], (p1 + p2) / z, 0.001); assert_approx_eq!(probs[6], (p2) / z, 0.001); assert_approx_eq!(probs[7], (p3) / z, 0.001); assert_approx_eq!(probs[8], (p4) / z, 0.001); } }
0