repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.py
# Generated content DO NOT EDIT from .. import trainers Trainer = trainers.Trainer BpeTrainer = trainers.BpeTrainer UnigramTrainer = trainers.UnigramTrainer WordLevelTrainer = trainers.WordLevelTrainer WordPieceTrainer = trainers.WordPieceTrainer
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.pyi
# Generated content DO NOT EDIT class Trainer: """ Base class for all trainers This class is not supposed to be instantiated directly. Instead, any implementation of a Trainer will return an instance of this class when instantiated. """ class BpeTrainer(Trainer): """ Trainer capable of training a BPE model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. limit_alphabet (:obj:`int`, `optional`): The maximum different characters to keep in the alphabet. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. continuing_subword_prefix (:obj:`str`, `optional`): A prefix to be used for every subword that is not a beginning-of-word. end_of_word_suffix (:obj:`str`, `optional`): A suffix to be used for every subword that is a end-of-word. max_token_length (:obj:`int`, `optional`): Prevents creating tokens longer than the specified size. This can help with reducing polluting your vocabulary with highly repetitive tokens like `======` for wikipedia """ class UnigramTrainer(Trainer): """ Trainer capable of training a Unigram model Args: vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. shrinking_factor (:obj:`float`): The shrinking factor used at each step of the training to prune the vocabulary. unk_token (:obj:`str`): The token used for out-of-vocabulary tokens. max_piece_length (:obj:`int`): The maximum length of a given token. n_sub_iterations (:obj:`int`): The number of iterations of the EM algorithm to perform before pruning the vocabulary. """ def __init__( self, vocab_size=8000, show_progress=True, special_tokens=[], shrinking_factor=0.75, unk_token=None, max_piece_length=16, n_sub_iterations=2, ): pass class WordLevelTrainer(Trainer): """ Trainer capable of training a WorldLevel model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`): A list of special tokens the model should know of. """ class WordPieceTrainer(Trainer): """ Trainer capable of training a WordPiece model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. limit_alphabet (:obj:`int`, `optional`): The maximum different characters to keep in the alphabet. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. continuing_subword_prefix (:obj:`str`, `optional`): A prefix to be used for every subword that is not a beginning-of-word. end_of_word_suffix (:obj:`str`, `optional`): A suffix to be used for every subword that is a end-of-word. """ def __init__( self, vocab_size=30000, min_frequency=0, show_progress=True, special_tokens=[], limit_alphabet=None, initial_alphabet=[], continuing_subword_prefix="##", end_of_word_suffix=None, ): pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/models/__init__.py
# Generated content DO NOT EDIT from .. import models Model = models.Model BPE = models.BPE Unigram = models.Unigram WordLevel = models.WordLevel WordPiece = models.WordPiece
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/models/__init__.pyi
# Generated content DO NOT EDIT class Model: """ Base class for all models The model represents the actual tokenization algorithm. This is the part that will contain and manage the learned vocabulary. This class cannot be constructed directly. Please use one of the concrete models. """ def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class BPE(Model): """ An implementation of the BPE (Byte-Pair Encoding) algorithm Args: vocab (:obj:`Dict[str, int]`, `optional`): A dictionnary of string keys and their ids :obj:`{"am": 0,...}` merges (:obj:`List[Tuple[str, str]]`, `optional`): A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]` cache_capacity (:obj:`int`, `optional`): The number of words that the BPE cache can contain. The cache allows to speed-up the process by keeping the result of the merge operations for a number of words. dropout (:obj:`float`, `optional`): A float between 0 and 1 that represents the BPE dropout to use. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. continuing_subword_prefix (:obj:`str`, `optional`): The prefix to attach to subword units that don't represent a beginning of word. end_of_word_suffix (:obj:`str`, `optional`): The suffix to attach to subword units that represent an end of word. fuse_unk (:obj:`bool`, `optional`): Whether to fuse any subsequent unknown tokens into a single one byte_fallback (:obj:`bool`, `optional`): Whether to use spm byte-fallback trick (defaults to False) """ def __init__( self, vocab=None, merges=None, cache_capacity=None, dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=None, byte_fallback=False, ): pass @staticmethod def from_file(cls, vocab, merge, **kwargs): """ Instantiate a BPE model from the given files. This method is roughly equivalent to doing:: vocab, merges = BPE.read_file(vocab_filename, merges_filename) bpe = BPE(vocab, merges) If you don't need to keep the :obj:`vocab, merges` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE` Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file merges (:obj:`str`): The path to a :obj:`merges.txt` file Returns: :class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(self, vocab, merges): """ Read a :obj:`vocab.json` and a :obj:`merges.txt` files This method provides a way to read and parse the content of these files, returning the relevant data structures. If you want to instantiate some BPE models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file merges (:obj:`str`): The path to a :obj:`merges.txt` file Returns: A :obj:`Tuple` with the vocab and the merges: The vocabulary and merges loaded into memory """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class Unigram(Model): """ An implementation of the Unigram algorithm Args: vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`): A list of vocabulary items and their relative score [("am", -0.2442),...] """ def __init__(self, vocab, unk_id, byte_fallback): pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class WordLevel(Model): """ An implementation of the WordLevel algorithm Most simple tokenizer model based on mapping tokens to their corresponding id. Args: vocab (:obj:`str`, `optional`): A dictionnary of string keys and their ids :obj:`{"am": 0,...}` unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. """ def __init__(self, vocab, unk_token): pass @staticmethod def from_file(vocab, unk_token): """ Instantiate a WordLevel model from the given file This method is roughly equivalent to doing:: vocab = WordLevel.read_file(vocab_filename) wordlevel = WordLevel(vocab) If you don't need to keep the :obj:`vocab` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to initialize a :class:`~tokenizers.models.WordLevel` Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file Returns: :class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(vocab): """ Read a :obj:`vocab.json` This method provides a way to read and parse the content of a vocabulary file, returning the relevant data structures. If you want to instantiate some WordLevel models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file Returns: :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class WordPiece(Model): """ An implementation of the WordPiece algorithm Args: vocab (:obj:`Dict[str, int]`, `optional`): A dictionnary of string keys and their ids :obj:`{"am": 0,...}` unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. max_input_chars_per_word (:obj:`int`, `optional`): The maximum number of characters to authorize in a single word. """ def __init__(self, vocab, unk_token, max_input_chars_per_word): pass @staticmethod def from_file(vocab, **kwargs): """ Instantiate a WordPiece model from the given file This method is roughly equivalent to doing:: vocab = WordPiece.read_file(vocab_filename) wordpiece = WordPiece(vocab) If you don't need to keep the :obj:`vocab` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to initialize a :class:`~tokenizers.models.WordPiece` Args: vocab (:obj:`str`): The path to a :obj:`vocab.txt` file Returns: :class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(vocab): """ Read a :obj:`vocab.txt` file This method provides a way to read and parse the content of a standard `vocab.txt` file as used by the WordPiece Model, returning the relevant data structures. If you want to instantiate some WordPiece models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.txt` file Returns: :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.py
from .. import decoders Decoder = decoders.Decoder ByteLevel = decoders.ByteLevel Replace = decoders.Replace WordPiece = decoders.WordPiece ByteFallback = decoders.ByteFallback Fuse = decoders.Fuse Strip = decoders.Strip Metaspace = decoders.Metaspace BPEDecoder = decoders.BPEDecoder CTC = decoders.CTC Sequence = decoders.Sequence
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.pyi
# Generated content DO NOT EDIT class Decoder: """ Base class for all decoders This class is not supposed to be instantiated directly. Instead, any implementation of a Decoder will return an instance of this class when instantiated. """ def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class BPEDecoder(Decoder): """ BPEDecoder Decoder Args: suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`): The suffix that was used to caracterize an end-of-word. This suffix will be replaced by whitespaces during the decoding """ def __init__(self, suffix="</w>"): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class ByteFallback(Decoder): """ ByteFallback Decoder ByteFallback is a simple trick which converts tokens looking like `<0x61>` to pure bytes, and attempts to make them into a string. If the tokens cannot be decoded you will get � instead for each inconvertable byte token """ def __init__(self): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class ByteLevel(Decoder): """ ByteLevel Decoder This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel` :class:`~tokenizers.pre_tokenizers.PreTokenizer`. """ def __init__(self): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class CTC(Decoder): """ CTC Decoder Args: pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`): The pad token used by CTC to delimit a new token. word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`): The word delimiter token. It will be replaced by a <space> cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation, and some abbreviated english forms. """ def __init__(self, pad_token="<pad>", word_delimiter_token="|", cleanup=True): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Fuse(Decoder): """ Fuse Decoder Fuse simply fuses every token into a single string. This is the last step of decoding, this decoder exists only if there is need to add other decoders *after* the fusion """ def __init__(self): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Metaspace(Decoder): """ Metaspace Decoder Args: replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): The replacement character. Must be exactly one character. By default we use the `▁` (U+2581) meta symbol (Same as in SentencePiece). add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to add a space to the first word if there isn't already one. This lets us treat `hello` exactly like `say hello`. """ def __init__(self, replacement="▁", add_prefix_space=True): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Replace(Decoder): """ Replace Decoder This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace` :class:`~tokenizers.pre_tokenizers.PreTokenizer`. """ def __init__(self, pattern, content): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Sequence(Decoder): """ Sequence Decoder Args: decoders (:obj:`List[Decoder]`) The decoders that need to be chained """ def __init__(self, decoders): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Strip(Decoder): """ Strip normalizer Strips n left characters of each token, or n right characters of each token """ def __init__(self, content, left=0, right=0): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class WordPiece(Decoder): """ WordPiece Decoder Args: prefix (:obj:`str`, `optional`, defaults to :obj:`##`): The prefix to use for subwords that are not a beginning-of-word cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation, and some abbreviated english forms. """ def __init__(self, prefix="##", cleanup=True): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py
# Generated content DO NOT EDIT from .. import pre_tokenizers PreTokenizer = pre_tokenizers.PreTokenizer BertPreTokenizer = pre_tokenizers.BertPreTokenizer ByteLevel = pre_tokenizers.ByteLevel CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit Digits = pre_tokenizers.Digits Metaspace = pre_tokenizers.Metaspace Punctuation = pre_tokenizers.Punctuation Sequence = pre_tokenizers.Sequence Split = pre_tokenizers.Split UnicodeScripts = pre_tokenizers.UnicodeScripts Whitespace = pre_tokenizers.Whitespace WhitespaceSplit = pre_tokenizers.WhitespaceSplit
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi
# Generated content DO NOT EDIT class PreTokenizer: """ Base class for all pre-tokenizers This class is not supposed to be instantiated directly. Instead, any implementation of a PreTokenizer will return an instance of this class when instantiated. """ def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class BertPreTokenizer(PreTokenizer): """ BertPreTokenizer This pre-tokenizer splits tokens on spaces, and also on punctuation. Each occurence of a punctuation character will be treated separately. """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class ByteLevel(PreTokenizer): """ ByteLevel PreTokenizer This pre-tokenizer takes care of replacing all bytes of the given string with a corresponding representation, as well as splitting into words. Args: add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to add a space to the first word if there isn't already one. This lets us treat `hello` exactly like `say hello`. use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`): Set this to :obj:`False` to prevent this `pre_tokenizer` from using the GPT2 specific regexp for spliting on whitespace. """ def __init__(self, add_prefix_space=True, use_regex=True): pass @staticmethod def alphabet(): """ Returns the alphabet used by this PreTokenizer. Since the ByteLevel works as its name suggests, at the byte level, it encodes each byte value to a unique visible character. This means that there is a total of 256 different characters composing this alphabet. Returns: :obj:`List[str]`: A list of characters that compose the alphabet """ pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class CharDelimiterSplit(PreTokenizer): """ This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)` Args: delimiter: str: The delimiter char that will be used to split input """ def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Digits(PreTokenizer): """ This pre-tokenizer simply splits using the digits in separate tokens Args: individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`): If set to True, digits will each be separated as follows:: "Call 123 please" -> "Call ", "1", "2", "3", " please" If set to False, digits will grouped as follows:: "Call 123 please" -> "Call ", "123", " please" """ def __init__(self, individual_digits=False): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Metaspace(PreTokenizer): """ Metaspace pre-tokenizer This pre-tokenizer replaces any whitespace by the provided replacement character. It then tries to split on these spaces. Args: replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): The replacement character. Must be exactly one character. By default we use the `▁` (U+2581) meta symbol (Same as in SentencePiece). add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to add a space to the first word if there isn't already one. This lets us treat `hello` exactly like `say hello`. """ def __init__(self, replacement="_", add_prefix_space=True): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Punctuation(PreTokenizer): """ This pre-tokenizer simply splits on punctuation as individual characters. Args: behavior (:class:`~tokenizers.SplitDelimiterBehavior`): The behavior to use when splitting. Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next", "contiguous" """ def __init__(self, behavior="isolated"): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Sequence(PreTokenizer): """ This pre-tokenizer composes other pre_tokenizers and applies them in sequence """ def __init__(self, pretokenizers): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Split(PreTokenizer): """ Split PreTokenizer This versatile pre-tokenizer splits using the provided pattern and according to the provided behavior. The pattern can be inverted by making use of the invert flag. Args: pattern (:obj:`str` or :class:`~tokenizers.Regex`): A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex` behavior (:class:`~tokenizers.SplitDelimiterBehavior`): The behavior to use when splitting. Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", "contiguous" invert (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to invert the pattern. """ def __init__(self, pattern, behavior, invert=False): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class UnicodeScripts(PreTokenizer): """ This pre-tokenizer splits on characters that belong to different language family It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too. This mimicks SentencePiece Unigram implementation. """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Whitespace(PreTokenizer): """ This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+` """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class WhitespaceSplit(PreTokenizer): """ This pre-tokenizer simply splits on the whitespace. Works like `.split()` """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/char_level_bpe.py
from typing import Dict, Iterator, List, Optional, Tuple, Union from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers from ..models import BPE from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str from .base_tokenizer import BaseTokenizer class CharBPETokenizer(BaseTokenizer): """Original BPE Tokenizer Represents the BPE algorithm, as introduced by Rico Sennrich (https://arxiv.org/abs/1508.07909) The defaults settings corresponds to OpenAI GPT BPE tokenizers and differs from the original Sennrich subword-nmt implementation by the following options that you can deactivate: - adding a normalizer to clean up the text (deactivate with `bert_normalizer=False`) by: * removing any control characters and replacing all whitespaces by the classic one. * handle chinese chars by putting spaces around them. * strip all accents. - spitting on punctuation in addition to whitespaces (deactivate it with `split_on_whitespace_only=True`) """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, unk_token: Union[str, AddedToken] = "<unk>", suffix: str = "</w>", dropout: Optional[float] = None, lowercase: bool = False, unicode_normalizer: Optional[str] = None, bert_normalizer: bool = True, split_on_whitespace_only: bool = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer( BPE( vocab, merges, dropout=dropout, unk_token=str(unk_token), end_of_word_suffix=suffix, ) ) else: tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix)) if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) # Check for Unicode normalization first (before everything else) normalizers = [] if unicode_normalizer: normalizers += [unicode_normalizer_from_str(unicode_normalizer)] if bert_normalizer: normalizers += [BertNormalizer(lowercase=False)] if lowercase: normalizers += [Lowercase()] # Create the normalizer structure if len(normalizers) > 0: if len(normalizers) > 1: tokenizer.normalizer = Sequence(normalizers) else: tokenizer.normalizer = normalizers[0] if split_on_whitespace_only: tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit() else: tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tokenizer.decoder = decoders.BPEDecoder(suffix=suffix) parameters = { "model": "BPE", "unk_token": unk_token, "suffix": suffix, "dropout": dropout, "lowercase": lowercase, "unicode_normalizer": unicode_normalizer, "bert_normalizer": bert_normalizer, "split_on_whitespace_only": split_on_whitespace_only, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return CharBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], suffix: Optional[str] = "</w>", show_progress: bool = True, ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, end_of_word_suffix=suffix, show_progress=show_progress, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], suffix: Optional[str] = "</w>", show_progress: bool = True, length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, end_of_word_suffix=suffix, show_progress=show_progress, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/bert_wordpiece.py
from typing import Dict, Iterator, List, Optional, Union from tokenizers import AddedToken, Tokenizer, decoders, trainers from tokenizers.models import WordPiece from tokenizers.normalizers import BertNormalizer from tokenizers.pre_tokenizers import BertPreTokenizer from tokenizers.processors import BertProcessing from .base_tokenizer import BaseTokenizer class BertWordPieceTokenizer(BaseTokenizer): """Bert WordPiece Tokenizer""" def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, unk_token: Union[str, AddedToken] = "[UNK]", sep_token: Union[str, AddedToken] = "[SEP]", cls_token: Union[str, AddedToken] = "[CLS]", pad_token: Union[str, AddedToken] = "[PAD]", mask_token: Union[str, AddedToken] = "[MASK]", clean_text: bool = True, handle_chinese_chars: bool = True, strip_accents: Optional[bool] = None, lowercase: bool = True, wordpieces_prefix: str = "##", ): if vocab is not None: tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(unk_token))) else: tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token))) # Let the tokenizer know about special tokens if they are part of the vocab if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) if tokenizer.token_to_id(str(sep_token)) is not None: tokenizer.add_special_tokens([str(sep_token)]) if tokenizer.token_to_id(str(cls_token)) is not None: tokenizer.add_special_tokens([str(cls_token)]) if tokenizer.token_to_id(str(pad_token)) is not None: tokenizer.add_special_tokens([str(pad_token)]) if tokenizer.token_to_id(str(mask_token)) is not None: tokenizer.add_special_tokens([str(mask_token)]) tokenizer.normalizer = BertNormalizer( clean_text=clean_text, handle_chinese_chars=handle_chinese_chars, strip_accents=strip_accents, lowercase=lowercase, ) tokenizer.pre_tokenizer = BertPreTokenizer() if vocab is not None: sep_token_id = tokenizer.token_to_id(str(sep_token)) if sep_token_id is None: raise TypeError("sep_token not found in the vocabulary") cls_token_id = tokenizer.token_to_id(str(cls_token)) if cls_token_id is None: raise TypeError("cls_token not found in the vocabulary") tokenizer.post_processor = BertProcessing((str(sep_token), sep_token_id), (str(cls_token), cls_token_id)) tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix) parameters = { "model": "BertWordPiece", "unk_token": unk_token, "sep_token": sep_token, "cls_token": cls_token, "pad_token": pad_token, "mask_token": mask_token, "clean_text": clean_text, "handle_chinese_chars": handle_chinese_chars, "strip_accents": strip_accents, "lowercase": lowercase, "wordpieces_prefix": wordpieces_prefix, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab: str, **kwargs): vocab = WordPiece.read_file(vocab) return BertWordPieceTokenizer(vocab, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, limit_alphabet: int = 1000, initial_alphabet: List[str] = [], special_tokens: List[Union[str, AddedToken]] = [ "[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]", ], show_progress: bool = True, wordpieces_prefix: str = "##", ): """Train the model using the given files""" trainer = trainers.WordPieceTrainer( vocab_size=vocab_size, min_frequency=min_frequency, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, special_tokens=special_tokens, show_progress=show_progress, continuing_subword_prefix=wordpieces_prefix, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, limit_alphabet: int = 1000, initial_alphabet: List[str] = [], special_tokens: List[Union[str, AddedToken]] = [ "[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]", ], show_progress: bool = True, wordpieces_prefix: str = "##", length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.WordPieceTrainer( vocab_size=vocab_size, min_frequency=min_frequency, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, special_tokens=special_tokens, show_progress=show_progress, continuing_subword_prefix=wordpieces_prefix, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py
from typing import Dict, Iterator, List, Optional, Tuple, Union from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers from tokenizers.models import BPE from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str from .base_tokenizer import BaseTokenizer class ByteLevelBPETokenizer(BaseTokenizer): """ByteLevelBPETokenizer Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, add_prefix_space: bool = False, lowercase: bool = False, dropout: Optional[float] = None, unicode_normalizer: Optional[str] = None, continuing_subword_prefix: Optional[str] = None, end_of_word_suffix: Optional[str] = None, trim_offsets: bool = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer( BPE( vocab, merges, dropout=dropout, continuing_subword_prefix=continuing_subword_prefix or "", end_of_word_suffix=end_of_word_suffix or "", ) ) else: tokenizer = Tokenizer(BPE()) # Check for Unicode normalization first (before everything else) normalizers = [] if unicode_normalizer: normalizers += [unicode_normalizer_from_str(unicode_normalizer)] if lowercase: normalizers += [Lowercase()] # Create the normalizer structure if len(normalizers) > 0: if len(normalizers) > 1: tokenizer.normalizer = Sequence(normalizers) else: tokenizer.normalizer = normalizers[0] tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets) parameters = { "model": "ByteLevelBPE", "add_prefix_space": add_prefix_space, "lowercase": lowercase, "dropout": dropout, "unicode_normalizer": unicode_normalizer, "continuing_subword_prefix": continuing_subword_prefix, "end_of_word_suffix": end_of_word_suffix, "trim_offsets": trim_offsets, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return ByteLevelBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, show_progress: bool = True, special_tokens: List[Union[str, AddedToken]] = [], ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, show_progress: bool = True, special_tokens: List[Union[str, AddedToken]] = [], length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py
from .base_tokenizer import BaseTokenizer from .bert_wordpiece import BertWordPieceTokenizer from .byte_level_bpe import ByteLevelBPETokenizer from .char_level_bpe import CharBPETokenizer from .sentencepiece_bpe import SentencePieceBPETokenizer from .sentencepiece_unigram import SentencePieceUnigramTokenizer
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py
import json import os from typing import Iterator, List, Optional, Union, Tuple from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.models import Unigram from .base_tokenizer import BaseTokenizer class SentencePieceUnigramTokenizer(BaseTokenizer): """SentencePiece Unigram Tokenizer Represents the Unigram algorithm, with the pretokenization used by SentencePiece """ def __init__( self, vocab: Optional[List[Tuple[str, float]]] = None, replacement: str = "▁", add_prefix_space: bool = True, ): if vocab is not None: # Let Unigram(..) fail if only one of them is None tokenizer = Tokenizer(Unigram(vocab)) else: tokenizer = Tokenizer(Unigram()) tokenizer.normalizer = normalizers.Sequence( [normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")] ) tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) parameters = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(tokenizer, parameters) def train( self, files: Union[str, List[str]], vocab_size: int = 8000, show_progress: bool = True, special_tokens: Optional[List[Union[str, AddedToken]]] = None, initial_alphabet: Optional[List[str]] = None, unk_token: Optional[str] = None, ): """ Train the model using the given files Args: files (:obj:`List[str]`): A list of path to the files that we should use for training vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. """ if special_tokens is None: special_tokens = [] if initial_alphabet is None: initial_alphabet = [] trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 8000, show_progress: bool = True, special_tokens: Optional[List[Union[str, AddedToken]]] = None, initial_alphabet: Optional[List[str]] = None, unk_token: Optional[str] = None, length: Optional[int] = None, ): """ Train the model using the given iterator Args: iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`): Any iterator over strings or list of strings vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. length (:obj:`int`, `optional`): The total number of sequences in the iterator. This is used to provide meaningful progress tracking """ if special_tokens is None: special_tokens = [] if initial_alphabet is None: initial_alphabet = [] trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, ) @staticmethod def from_spm(filename: str): try: import sys sys.path.append(".") import sentencepiece_model_pb2 as model except Exception: raise Exception( "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required." ) m = model.ModelProto() m.ParseFromString(open(filename, "rb").read()) precompiled_charsmap = m.normalizer_spec.precompiled_charsmap vocab = [(piece.piece, piece.score) for piece in m.pieces] unk_id = m.trainer_spec.unk_id model_type = m.trainer_spec.model_type byte_fallback = m.trainer_spec.byte_fallback if model_type != 1: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" ) replacement = "▁" add_prefix_space = True tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback)) if precompiled_charsmap: tokenizer.normalizer = normalizers.Sequence( [ normalizers.Precompiled(precompiled_charsmap), normalizers.Replace(Regex(" {2,}"), " "), ] ) else: tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")]) tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) parameters = { "model": "SentencePieceUnigram", } obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters) BaseTokenizer.__init__(obj, tokenizer, parameters) return obj
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py
from typing import Dict, Iterator, List, Optional, Tuple, Union from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers from tokenizers.models import BPE from tokenizers.normalizers import NFKC from .base_tokenizer import BaseTokenizer class SentencePieceBPETokenizer(BaseTokenizer): """SentencePiece BPE Tokenizer Represents the BPE algorithm, with the pretokenization used by SentencePiece """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, unk_token: Union[str, AddedToken] = "<unk>", replacement: str = "▁", add_prefix_space: bool = True, dropout: Optional[float] = None, fuse_unk: Optional[bool] = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk)) else: tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk)) if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) tokenizer.normalizer = NFKC() tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) parameters = { "model": "SentencePieceBPE", "unk_token": unk_token, "replacement": replacement, "add_prefix_space": add_prefix_space, "dropout": dropout, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return SentencePieceBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], show_progress: bool = True, ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], show_progress: bool = True, length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/base_tokenizer.py
from typing import Dict, List, Optional, Tuple, Union from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer from tokenizers.decoders import Decoder from tokenizers.models import Model from tokenizers.normalizers import Normalizer from tokenizers.pre_tokenizers import PreTokenizer from tokenizers.processors import PostProcessor Offsets = Tuple[int, int] class BaseTokenizer: def __init__(self, tokenizer: Tokenizer, parameters=None): self._tokenizer = tokenizer self._parameters = parameters if parameters is not None else {} def __repr__(self): return "Tokenizer(vocabulary_size={}, {})".format( self._tokenizer.get_vocab_size(), ", ".join(k + "=" + str(v) for k, v in self._parameters.items()), ) def num_special_tokens_to_add(self, is_pair: bool) -> int: """ Return the number of special tokens that would be added for single/pair sentences. :param is_pair: Boolean indicating if the input would be a single sentence or a pair :return: """ return self._tokenizer.num_special_tokens_to_add(is_pair) def get_vocab(self, with_added_tokens: bool = True) -> Dict[str, int]: """Returns the vocabulary Args: with_added_tokens: boolean: Whether to include the added tokens in the vocabulary Returns: The vocabulary """ return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens) def get_added_tokens_decoder(self) -> Dict[int, AddedToken]: """Returns the added reverse vocabulary Returns: The added vocabulary mapping ints to AddedTokens """ return self._tokenizer.get_added_tokens_decoder() def get_vocab_size(self, with_added_tokens: bool = True) -> int: """Return the size of vocabulary, with or without added tokens. Args: with_added_tokens: (`optional`) bool: Whether to count in added special tokens or not Returns: Size of vocabulary """ return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens) def enable_padding( self, direction: Optional[str] = "right", pad_to_multiple_of: Optional[int] = None, pad_id: Optional[int] = 0, pad_type_id: Optional[int] = 0, pad_token: Optional[str] = "[PAD]", length: Optional[int] = None, ): """Change the padding strategy Args: direction: (`optional`) str: Can be one of: `right` or `left` pad_to_multiple_of: (`optional`) unsigned int: If specified, the padding length should always snap to the next multiple of the given value. For example if we were going to pad with a length of 250 but `pad_to_multiple_of=8` then we will pad to 256. pad_id: (`optional`) unsigned int: The indice to be used when padding pad_type_id: (`optional`) unsigned int: The type indice to be used when padding pad_token: (`optional`) str: The pad token to be used when padding length: (`optional`) unsigned int: If specified, the length at which to pad. If not specified we pad using the size of the longest sequence in a batch """ return self._tokenizer.enable_padding( direction=direction, pad_to_multiple_of=pad_to_multiple_of, pad_id=pad_id, pad_type_id=pad_type_id, pad_token=pad_token, length=length, ) def no_padding(self): """Disable padding""" return self._tokenizer.no_padding() @property def padding(self) -> Optional[dict]: """Get the current padding parameters Returns: None if padding is disabled, a dict with the currently set parameters if the padding is enabled. """ return self._tokenizer.padding def enable_truncation(self, max_length: int, stride: Optional[int] = 0, strategy: Optional[str] = "longest_first"): """Change the truncation options Args: max_length: unsigned int: The maximum length at which to truncate stride: (`optional`) unsigned int: The length of the previous first sequence to be included in the overflowing sequence strategy: (`optional`) str: Can be one of `longest_first`, `only_first` or `only_second` """ return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy) def no_truncation(self): """Disable truncation""" return self._tokenizer.no_truncation() @property def truncation(self) -> Optional[dict]: """Get the current truncation parameters Returns: None if truncation is disabled, a dict with the current truncation parameters if truncation is enabled """ return self._tokenizer.truncation def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int: """Add the given tokens to the vocabulary Args: tokens: List[Union[str, AddedToken]]: A list of tokens to add to the vocabulary. Each token can either be a string, or an instance of AddedToken Returns: The number of tokens that were added to the vocabulary """ return self._tokenizer.add_tokens(tokens) def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int: """Add the given special tokens to the vocabulary, and treat them as special tokens. The special tokens will never be processed by the model, and will be removed while decoding. Args: tokens: List[Union[str, AddedToken]]: A list of special tokens to add to the vocabulary. Each token can either be a string, or an instance of AddedToken Returns: The number of tokens that were added to the vocabulary """ return self._tokenizer.add_special_tokens(special_tokens) def normalize(self, sequence: str) -> str: """Normalize the given sequence Args: sequence: str: The sequence to normalize Returns: The normalized string """ return self._tokenizer.normalize(sequence) def encode( self, sequence: InputSequence, pair: Optional[InputSequence] = None, is_pretokenized: bool = False, add_special_tokens: bool = True, ) -> Encoding: """Encode the given sequence and pair. This method can process raw text sequences as well as already pre-tokenized sequences. Args: sequence: InputSequence: The sequence we want to encode. This sequence can be either raw text or pre-tokenized, according to the `is_pretokenized` argument: - If `is_pretokenized=False`: `InputSequence` is expected to be `str` - If `is_pretokenized=True`: `InputSequence` is expected to be `Union[List[str], Tuple[str]]` is_pretokenized: bool: Whether the input is already pre-tokenized. add_special_tokens: bool: Whether to add the special tokens while encoding. Returns: An Encoding """ if sequence is None: raise ValueError("encode: `sequence` can't be `None`") return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens) def encode_batch( self, inputs: List[EncodeInput], is_pretokenized: bool = False, add_special_tokens: bool = True, ) -> List[Encoding]: """Encode the given inputs. This method accept both raw text sequences as well as already pre-tokenized sequences. Args: inputs: List[EncodeInput]: A list of single sequences or pair sequences to encode. Each `EncodeInput` is expected to be of the following form: `Union[InputSequence, Tuple[InputSequence, InputSequence]]` Each `InputSequence` can either be raw text or pre-tokenized, according to the `is_pretokenized` argument: - If `is_pretokenized=False`: `InputSequence` is expected to be `str` - If `is_pretokenized=True`: `InputSequence` is expected to be `Union[List[str], Tuple[str]]` is_pretokenized: bool: Whether the input is already pre-tokenized. add_special_tokens: bool: Whether to add the special tokens while encoding. Returns: A list of Encoding """ if inputs is None: raise ValueError("encode_batch: `inputs` can't be `None`") return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens) def decode(self, ids: List[int], skip_special_tokens: Optional[bool] = True) -> str: """Decode the given list of ids to a string sequence Args: ids: List[unsigned int]: A list of ids to be decoded skip_special_tokens: (`optional`) boolean: Whether to remove all the special tokens from the output string Returns: The decoded string """ if ids is None: raise ValueError("None input is not valid. Should be a list of integers.") return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens) def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool] = True) -> str: """Decode the list of sequences to a list of string sequences Args: sequences: List[List[unsigned int]]: A list of sequence of ids to be decoded skip_special_tokens: (`optional`) boolean: Whether to remove all the special tokens from the output strings Returns: A list of decoded strings """ if sequences is None: raise ValueError("None input is not valid. Should be list of list of integers.") return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens) def token_to_id(self, token: str) -> Optional[int]: """Convert the given token to its corresponding id Args: token: str: The token to convert Returns: The corresponding id if it exists, None otherwise """ return self._tokenizer.token_to_id(token) def id_to_token(self, id: int) -> Optional[str]: """Convert the given token id to its corresponding string Args: token: id: The token id to convert Returns: The corresponding string if it exists, None otherwise """ return self._tokenizer.id_to_token(id) def save_model(self, directory: str, prefix: Optional[str] = None): """Save the current model to the given directory Args: directory: str: A path to the destination directory prefix: (Optional) str: An optional prefix, used to prefix each file name """ return self._tokenizer.model.save(directory, prefix=prefix) def save(self, path: str, pretty: bool = True): """Save the current Tokenizer at the given path Args: path: str: A path to the destination Tokenizer file """ return self._tokenizer.save(path, pretty) def to_str(self, pretty: bool = False): """Get a serialized JSON version of the Tokenizer as a str Args: pretty: bool: Whether the JSON string should be prettified Returns: str """ return self._tokenizer.to_str(pretty) def post_process( self, encoding: Encoding, pair: Optional[Encoding] = None, add_special_tokens: bool = True ) -> Encoding: """Apply all the post-processing steps to the given encodings. The various steps are: 1. Truncate according to global params (provided to `enable_truncation`) 2. Apply the PostProcessor 3. Pad according to global params. (provided to `enable_padding`) Args: encoding: Encoding: The main Encoding to post process pair: Optional[Encoding]: An optional pair Encoding add_special_tokens: bool: Whether to add special tokens Returns: The resulting Encoding """ return self._tokenizer.post_process(encoding, pair, add_special_tokens) @property def model(self) -> Model: return self._tokenizer.model @model.setter def model(self, model: Model): self._tokenizer.model = model @property def normalizer(self) -> Normalizer: return self._tokenizer.normalizer @normalizer.setter def normalizer(self, normalizer: Normalizer): self._tokenizer.normalizer = normalizer @property def pre_tokenizer(self) -> PreTokenizer: return self._tokenizer.pre_tokenizer @pre_tokenizer.setter def pre_tokenizer(self, pre_tokenizer: PreTokenizer): self._tokenizer.pre_tokenizer = pre_tokenizer @property def post_processor(self) -> PostProcessor: return self._tokenizer.post_processor @post_processor.setter def post_processor(self, post_processor: PostProcessor): self._tokenizer.post_processor = post_processor @property def decoder(self) -> Decoder: return self._tokenizer.decoder @decoder.setter def decoder(self, decoder: Decoder): self._tokenizer.decoder = decoder
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/scripts/spm_parity_check.py
import tokenizers from argparse import ArgumentParser import sentencepiece as spm from collections import Counter import json import os import datetime try: from termcolor import colored has_color = True except Exception: has_color = False def main(): parser = ArgumentParser("SentencePiece parity checker") parser.add_argument( "--input-file", "-i", type=str, required=True, help="Which files do you want to train from", ) parser.add_argument( "--model-file", "-m", type=str, required=False, default=None, help="Use a pretrained token file", ) parser.add_argument( "--model-prefix", type=str, default="spm_parity", help="Model prefix for spm_train", ) parser.add_argument( "--vocab-size", "-v", type=int, default=8000, help="Vocab size for spm_train", ) parser.add_argument( "--verbose", action="store_true", help="Verbosity", ) parser.add_argument( "--train", action="store_true", help="Instead of checking the encoder part, we check the trainer part", ) parser.add_argument( "--from-spm", action="store_true", help="Directly load the spm file with it's own normalizer", ) args = parser.parse_args() trained = False if args.model_file is None: spm.SentencePieceTrainer.Train( f"--input={args.input_file} --model_prefix={args.model_prefix}" f" --character_coverage=1.0" f" --max_sentence_length=40000" f" --num_threads=1" f" --vocab_size={args.vocab_size}" ) trained = True args.model_file = f"{args.model_prefix}.model" try: if args.train: check_train(args) else: check_encode(args) finally: if trained: os.remove(f"{args.model_prefix}.model") os.remove(f"{args.model_prefix}.vocab") def check_train(args): sp = spm.SentencePieceProcessor() sp.Load(args.model_file) tokenizer = tokenizers.SentencePieceUnigramTokenizer() tokenizer.train(args.input_file, show_progress=False) spm_tokens = 0 tokenizer_tokens = 0 with open(args.input_file, "r") as f: for i, line in enumerate(f): line = line.strip() ids = sp.EncodeAsIds(line) encoded = tokenizer.encode(line) spm_tokens += len(ids) tokenizer_tokens += len(encoded.ids) vocab = [0 for i in range(args.vocab_size)] spm_vocab = [0 for i in range(args.vocab_size)] for token, index in tokenizer.get_vocab().items(): vocab[index] = token for i in range(args.vocab_size): spm_vocab[i] = sp.id_to_piece(i) # 0 is unk in tokenizers, 0, 1, 2 are unk bos, eos in spm by default. for i, (token, spm_token) in enumerate(zip(vocab[1:], spm_vocab[3:])): if token != spm_token: print(f"First different token is token {i} ({token} != {spm_token})") break print(f"Tokenizer used {tokenizer_tokens}, where spm used {spm_tokens}") assert ( tokenizer_tokens < spm_tokens ), "Our trainer should be at least more efficient than the SPM one" print("Ok our trainer is at least more efficient than the SPM one") def check_diff(spm_diff, tok_diff, sp, tok): if spm_diff == list(reversed(tok_diff)): # AAA -> AA+A vs A+AA case. return True elif len(spm_diff) == len(tok_diff) and tok.decode(spm_diff) == tok.decode( tok_diff ): # Second order OK # Barrich -> Barr + ich vs Bar + rich return True spm_reencoded = sp.encode(sp.decode(spm_diff)) tok_reencoded = tok.encode(tok.decode(spm_diff)).ids if spm_reencoded != spm_diff and spm_reencoded == tok_reencoded: # Type 3 error. # Snehagatha -> # Sne, h, aga, th, a # Sne, ha, gat, ha # Encoding the wrong with sp does not even recover what spm gave us # It fits tokenizer however... return True return False def check_details(line, spm_ids, tok_ids, sp, tok): # Encoding can be the same with same result AAA -> A + AA vs AA + A # We can check that we use at least exactly the same number of tokens. for i, (spm_id, tok_id) in enumerate(zip(spm_ids, tok_ids)): if spm_id != tok_id: break first = i for i, (spm_id, tok_id) in enumerate(zip(reversed(spm_ids), reversed(tok_ids))): if spm_id != tok_id: break last = len(spm_ids) - i spm_diff = spm_ids[first:last] tok_diff = tok_ids[first:last] if check_diff(spm_diff, tok_diff, sp, tok): return True if last - first > 5: # We might have twice a single problem, attempt to subdivide the disjointed tokens into smaller problems spms = Counter(spm_ids[first:last]) toks = Counter(tok_ids[first:last]) removable_tokens = { spm_ for (spm_, si) in spms.items() if toks.get(spm_, 0) == si } min_width = 3 for i in range(last - first - min_width): if all( spm_ids[first + i + j] in removable_tokens for j in range(min_width) ): possible_matches = [ k for k in range(last - first - min_width) if tok_ids[first + k : first + k + min_width] == spm_ids[first + i : first + i + min_width] ] for j in possible_matches: if check_diff( spm_ids[first : first + i], tok_ids[first : first + j], sp, tok ) and check_details( line, spm_ids[first + i : last], tok_ids[first + j : last], sp, tok, ): return True print(f"Spm: {[tok.decode([spm_ids[i]]) for i in range(first, last)]}") try: print(f"Tok: {[tok.decode([tok_ids[i]]) for i in range(first, last)]}") except Exception: pass ok_start = tok.decode(spm_ids[:first]) ok_end = tok.decode(spm_ids[last:]) wrong = tok.decode(spm_ids[first:last]) print() if has_color: print( f"{colored(ok_start, 'grey')}{colored(wrong, 'red')}{colored(ok_end, 'grey')}" ) else: print(wrong) return False def check_encode(args): sp = spm.SentencePieceProcessor() sp.Load(args.model_file) if args.from_spm: tok = tokenizers.SentencePieceUnigramTokenizer.from_spm(args.model_file) else: vocab = [(sp.id_to_piece(i), sp.get_score(i)) for i in range(sp.piece_size())] unk_id = sp.unk_id() tok = tokenizers.SentencePieceUnigramTokenizer(vocab, unk_id) perfect = 0 imperfect = 0 wrong = 0 now = datetime.datetime.now spm_total_time = datetime.timedelta(seconds=0) tok_total_time = datetime.timedelta(seconds=0) with open(args.input_file, "r", encoding="utf-8-sig") as f: for i, line in enumerate(f): line = line.strip() start = now() ids = sp.EncodeAsIds(line) spm_time = now() encoded = tok.encode(line) tok_time = now() spm_total_time += spm_time - start tok_total_time += tok_time - spm_time if args.verbose: if i % 10000 == 0: print( f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})" ) print(f"SPM: {spm_total_time} - TOK: {tok_total_time}") if ids != encoded.ids: if check_details(line, ids, encoded.ids, sp, tok): imperfect += 1 continue else: wrong += 1 else: perfect += 1 assert ids == encoded.ids, f"line {i}: {line} : \n\n{ids}\n{encoded.ids}\n{list(zip(encoded.ids, encoded.tokens))}" print(f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})") total = perfect + imperfect + wrong print( f"Accuracy {perfect * 100 / total:.2f} Slowdown : {tok_total_time/ spm_total_time:.2f}" ) if __name__ == "__main__": main()
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/scripts/sentencepiece_extractor.py
from argparse import ArgumentParser from json import dump from logging import basicConfig, getLogger from os import linesep, remove from os.path import exists from tempfile import NamedTemporaryFile from typing import Dict, List, Tuple from requests import get from sentencepiece import SentencePieceProcessor from tqdm import trange, tqdm basicConfig() logger = getLogger() class SentencePieceExtractor: """ Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece """ def __init__(self, model: str): # Get SentencePiece self.sp = SentencePieceProcessor() self.sp.Load(model) def extract(self) -> Tuple[Dict[str, int], List[Tuple]]: sp = self.sp vocab = {sp.id_to_piece(index): index for index in trange(sp.GetPieceSize())} # Merges merges = [] for piece_l in tqdm(vocab.keys(), total=sp.GetPieceSize()): for piece_r in vocab.keys(): merge = f"{piece_l}{piece_r}" piece_id = vocab.get(merge, None) if piece_id: merges += [(piece_l, piece_r, piece_id)] merges = sorted(merges, key=lambda val: val[2]) merges = [(val[0], val[1]) for val in merges] return vocab, merges class YouTokenToMeExtractor: """ Extractor implementation for YouTokenToMe trained models format. Model are as follow: vocab_size nb_merges piece piece_id ...(repeated vocab_size) piece_id_left piece_id_right piece_id ...(repeated nb merges) """ def __init__(self, model: str): self._model = model def extract(self) -> Tuple[Dict[str, int], List[Tuple]]: with open(self._model, "r") as model_f: # Retrieve information nb_pieces, nb_merges = map(int, model_f.readline().split()) vocab, merges = {}, [] # Vocab for _ in trange(nb_pieces): piece, piece_id = map(int, model_f.readline().split()) vocab[piece_id] = chr(piece) # Merges for _ in trange(nb_merges): piece_id_l, piece_id_r, piece = map(int, model_f.readline().split()) piece_l, piece_r = vocab[piece_id_l], vocab[piece_id_r] vocab[piece] = f"{piece_l}{piece_r}" merges += [(piece_l, piece_r)] # Special tokens unk, pad, bos, eos = map(int, model_f.readline().split()) vocab[unk] = "<unk>" vocab[pad] = "<pad>" vocab[bos] = "<bos>" vocab[eos] = "<eos>" # Invert key and value for vocab vocab = dict(zip(vocab.values(), vocab.keys())) return vocab, merges if __name__ == "__main__": parser = ArgumentParser("SentencePiece vocab extractor") parser.add_argument( "--provider", type=str, required=True, choices=["sentencepiece", "youtokentome"], help="Indicate the format of the file.", ) parser.add_argument( "--model", type=str, required=True, help="SentencePiece model to extract vocab from." ) parser.add_argument( "--vocab-output-path", type=str, required=True, help="Path where the vocab.json file will be extracted", ) parser.add_argument( "--merges-output-path", type=str, required=True, help="Path where the merges file will be extracted", ) # Parse cli arguments args = parser.parse_args() try: if args.model.startswith("http"): # Saving model with NamedTemporaryFile("wb", delete=False) as f: logger.info("Writing content from {} to {}".format(args.model, f.name)) response = get(args.model, allow_redirects=True) f.write(response.content) args.remote_model = args.model args.model = f.name # Allocate extractor extractor = ( SentencePieceExtractor if args.provider == "sentencepiece" else YouTokenToMeExtractor ) extractor = extractor(args.model) logger.info(f"Using {type(extractor).__name__}") # Open output files and let's extract model information with open(args.vocab_output_path, "w") as vocab_f: with open(args.merges_output_path, "w") as merges_f: # Do the extraction vocab, merges = extractor.extract() # Save content dump(vocab, vocab_f) merges_f.writelines(map(lambda x: f"{x[0]} {x[1]}{linesep}", merges)) finally: # If model was downloaded from internet we need to cleanup the tmp folder. if hasattr(args, "remote_model") and exists(args.model): remove(args.model)
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/scripts/convert.py
import transformers from tokenizers.implementations import SentencePieceUnigramTokenizer, BaseTokenizer from tokenizers.processors import TemplateProcessing from tokenizers.models import Unigram, BPE from tokenizers import decoders from tokenizers import Tokenizer, Regex from tokenizers.normalizers import ( StripAccents, NFKD, Lowercase, Sequence, BertNormalizer, Precompiled, Replace, ) from tokenizers.pre_tokenizers import ( Digits, WhitespaceSplit, Metaspace, Sequence as PSequence, ) import json import unicodedata import sys import os import datetime import argparse sys.path.append(".") from spm_parity_check import check_details from sentencepiece_extractor import SentencePieceExtractor def check_number_comma(piece: str) -> bool: return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit() def get_proto(filename: str): try: import sys sys.path.append(".") import sentencepiece_model_pb2 as model except Exception: raise Exception( "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required." ) m = model.ModelProto() m.ParseFromString(open(filename, "rb").read()) return m class Converter: def __init__(self, original_tokenizer): self.original_tokenizer = original_tokenizer def converted(self) -> Tokenizer: raise NotImplementedError() class SpmConverter(Converter): def __init__(self, *args): super().__init__(*args) self.proto = get_proto(self.original_tokenizer.vocab_file) def vocab(self, proto): return [(piece.piece, piece.score) for piece in proto.pieces] def unk_id(self, proto): return proto.trainer_spec.unk_id def tokenizer(self, proto): model_type = proto.trainer_spec.model_type vocab = self.vocab(proto) unk_id = self.unk_id(proto) if model_type == 1: tokenizer = Tokenizer(Unigram(vocab, unk_id)) elif model_type == 2: vocab, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract() tokenizer = Tokenizer( BPE(vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True) ) else: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" ) return tokenizer def normalizer(self, proto): precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap return Sequence([Precompiled(precompiled_charsmap), Replace(Regex(" {2,}"), " ")]) def post_processor(self, tokenizer): return None def converted(self): tokenizer = self.tokenizer(self.proto) # Tokenizer assemble tokenizer.normalizer = self.normalizer(self.proto) replacement = "▁" add_prefix_space = True tokenizer.pre_tokenizer = Metaspace( replacement=replacement, add_prefix_space=add_prefix_space ) tokenizer.decoder = decoders.Metaspace( replacement=replacement, add_prefix_space=add_prefix_space ) post_processor = self.post_processor(tokenizer) if post_processor: tokenizer.post_processor = post_processor # TODO what parameters should we give ? parameters = {} return BaseTokenizer(tokenizer, parameters) class AlbertConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): normalizers = [Replace("``", '"'), Replace("''", '"')] if not self.original_tokenizer.keep_accents: normalizers.append(NFKD()) normalizers.append(StripAccents()) if self.original_tokenizer.do_lower_case: normalizers.append(Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap normalizers.append(Precompiled(precompiled_charsmap)) normalizers.append(Replace(Regex(" {2,}"), " ")) return Sequence(normalizers) def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["[CLS]", "$0", "[SEP]"], seq_b=["$1", "[SEP]"], special_tokens=[ ("[CLS]", tokenizer.get_vocab()["[CLS]"]), ("[SEP]", tokenizer.get_vocab()["[SEP]"]), ], ) class CamembertConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>NOTUSED", 0.0), ("<pad>", 0.0), ("</s>NOTUSED", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces] return vocab def unk_id(self, proto): # See vocab unk position return 3 def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["<s>", "$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[ ("<s>", tokenizer.get_vocab()["<s>"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class MBartConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [ ("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ] return vocab def unk_id(self, proto): return 3 def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "</s>", "en_XX"], seq_b=["$1", "</s>"], special_tokens=[ ("en_XX", tokenizer.get_vocab()["en_XX"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class XLMRobertaConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["<s>", "$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[ ("<s>", tokenizer.get_vocab()["<s>"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class XLNetConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): normalizers = [Replace("``", '"'), Replace("''", '"')] if not self.original_tokenizer.keep_accents: normalizers.append(NFKD()) normalizers.append(StripAccents()) if self.original_tokenizer.do_lower_case: normalizers.append(Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap normalizers.append(Precompiled(precompiled_charsmap)) normalizers.append(Replace(Regex(" {2,}"), " ")) return Sequence(normalizers) def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "<sep>", "<cls>"], seq_b=["$1", "<sep>"], special_tokens=[ ("<sep>", tokenizer.get_vocab()["<sep>"]), ("<cls>", tokenizer.get_vocab()["<cls>"]), ], ) class ReformerConverter(SpmConverter): pass class PegasusConverter(SpmConverter): offset = 103 def vocab(self, proto): vocab = [ (self.original_tokenizer.pad_token, 0), (self.original_tokenizer.eos_token, 0), ] vocab += [(f"unk_{i}", -100) for i in range(2, 2 + self.offset)] vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]] return vocab def unk_id(self, proto): return proto.trainer_spec.unk_id + self.offset def post_processor(self, tokenizer): eos = self.original_tokenizer.eos_token return TemplateProcessing( seq_a=["$0", eos], seq_b=["$1", eos], special_tokens=[(eos, tokenizer.get_vocab()[eos])], ) class T5Converter(SpmConverter): def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[("</s>", tokenizer.get_vocab()["</s>"])], ) CONVERTERS = { "AlbertTokenizer": AlbertConverter, "CamembertTokenizer": CamembertConverter, "XLMRobertaTokenizer": XLMRobertaConverter, "MBartTokenizer": MBartConverter, "XLNetTokenizer": XLNetConverter, "ReformerTokenizer": ReformerConverter, "PegasusTokenizer": PegasusConverter, "T5Tokenizer": T5Converter, } def check(pretrained, filename): transformer_tokenizer = transformers.AutoTokenizer.from_pretrained(pretrained) converter_class = CONVERTERS[transformer_tokenizer.__class__.__name__] tokenizer = converter_class(transformer_tokenizer).converted() now = datetime.datetime.now trans_total_time = datetime.timedelta(seconds=0) tok_total_time = datetime.timedelta(seconds=0) with open(filename, "r") as f: for i, line in enumerate(f): line = line.strip() start = now() ids = transformer_tokenizer.encode(line) trans = now() tok_ids = tokenizer.encode(line).ids tok = now() trans_total_time += trans - start tok_total_time += tok - trans if ids != tok_ids: if check_details(line, ids, tok_ids, transformer_tokenizer, tokenizer): continue assert ids == tok_ids, f"Error in line {i}: {line} {ids} != {tok_ids}" tokenizer.save(f"{pretrained.replace('/', '-')}.json") return ("OK", trans_total_time / tok_total_time) def main(): pretraineds = [ "albert-base-v1", "albert-large-v1", "albert-xlarge-v1", "albert-xxlarge-v1", "albert-base-v2", "albert-large-v2", "albert-xlarge-v2", "albert-xxlarge-v2", "camembert-base", "xlm-roberta-base", "xlm-roberta-large", "xlm-roberta-large-finetuned-conll02-dutch", "xlm-roberta-large-finetuned-conll02-spanish", "xlm-roberta-large-finetuned-conll03-english", "xlm-roberta-large-finetuned-conll03-german", "facebook/mbart-large-en-ro", "facebook/mbart-large-cc25", "xlnet-base-cased", "xlnet-large-cased", "google/reformer-crime-and-punishment", "t5-small", "google/pegasus-large", ] parser = argparse.ArgumentParser() parser.add_argument( "--filename", required=True, type=str, help="The filename that we are going to encode in both versions to check that conversion worked", ) parser.add_argument( "--models", type=lambda s: s.split(","), default=pretraineds, help=f"The pretrained tokenizers you want to test agains, (default: {pretraineds})", ) args = parser.parse_args() print(args.filename) model_len = 50 status_len = 6 speedup_len = 8 print(f"|{'Model':^{model_len}}|{'Status':^{status_len}}|{'Speedup':^{speedup_len}}|") print(f"|{'-'*model_len}|{'-'*status_len}|{'-'*speedup_len}|") for pretrained in args.models: status, speedup = check(pretrained, args.filename) print( f"|{pretrained:<{model_len}}|{status:^{status_len}}|{speedup:^{speedup_len - 1}.2f}x|" ) if __name__ == "__main__": main()
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/examples/using_the_visualizer.ipynb
from tokenizers import BertWordPieceTokenizer from tokenizers.tools import EncodingVisualizer EncodingVisualizer.unk_token_regex.search("aaa[udsnk]aaa")text = """Mathias Bynens 'Z͑ͫ̓ͪ̂ͫ̽͏̴̙̤̞͉͚̯̞̠͍A̴̵̜̰͔ͫ͗͢L̠ͨͧͩ͘G̴̻͈͍͔̹̑͗̎̅͛́Ǫ̵̹̻̝̳͂̌̌͘!͖̬̰̙̗̿̋ͥͥ̂ͣ̐́́͜͞': Whenever you’re working on a piece of JavaScript code that deals with strings or regular expressions in some way, just add a unit test that contains a pile of poo (💩) in a string, 💩💩💩💩💩💩💩💩💩💩💩💩 and see if anything breaks. It’s a quick, fun, and easy way to see if your code supports astral symbols. Once you’ve found a Unicode-related bug in your code, all you need to do is apply the techniques discussed in this post to fix it."""tokenizer = BertWordPieceTokenizer("/tmp/bert-base-uncased-vocab.txt", lowercase=True) visualizer = EncodingVisualizer(tokenizer=tokenizer)visualizer(text)from tokenizers.tools import Annotationanno1 = Annotation(start=0, end=2, label="foo") anno2 = Annotation(start=2, end=4, label="bar") anno3 = Annotation(start=6, end=8, label="poo") anno4 = Annotation(start=9, end=12, label="shoe") annotations=[ anno1, anno2, anno3, anno4, Annotation(start=23, end=30, label="random tandem bandem sandem landem fandom"), Annotation(start=63, end=70, label="foo"), Annotation(start=80, end=95, label="bar"), Annotation(start=120, end=128, label="bar"), Annotation(start=152, end=155, label="poo"), ] visualizer(text,annotations=annotations)funnyAnnotations = [dict(startPlace=i,endPlace=i+3,theTag=str(i)) for i in range(0,20,4)] funnyAnnotationsconverter = lambda funny: Annotation(start=funny['startPlace'], end=funny['endPlace'], label=funny['theTag']) visualizer = EncodingVisualizer(tokenizer=tokenizer, default_to_notebook=True, annotation_converter=converter)visualizer(text, annotations=funnyAnnotations)from tokenizers import ByteLevelBPETokenizer roberta_tokenizer = ByteLevelBPETokenizer.from_file('/tmp/roberta-base-vocab.json', '/tmp/roberta-base-merges.txt') roberta_visualizer = EncodingVisualizer(tokenizer=roberta_tokenizer, default_to_notebook=True) roberta_visualizer(text, annotations=annotations)
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/examples/train_bytelevel_bpe.py
import argparse import glob from os.path import join from tokenizers import ByteLevelBPETokenizer parser = argparse.ArgumentParser() parser.add_argument( "--files", default=None, metavar="path", type=str, required=True, help="The files to use as training; accept '**/*.txt' type of patterns \ if enclosed in quotes", ) parser.add_argument( "--out", default="./", type=str, help="Path to the output directory, where the files will be saved", ) parser.add_argument("--name", default="bpe-bytelevel", type=str, help="The name of the output vocab files") args = parser.parse_args() files = glob.glob(args.files) if not files: print(f"File does not exist: {args.files}") exit(1) # Initialize an empty tokenizer tokenizer = ByteLevelBPETokenizer(add_prefix_space=True) # And then train tokenizer.train( files, vocab_size=10000, min_frequency=2, show_progress=True, special_tokens=["<s>", "<pad>", "</s>"], ) # Save the files tokenizer.save_model(args.out, args.name) # Restoring model from learned vocab/merges tokenizer = ByteLevelBPETokenizer( join(args.out, "{}-vocab.json".format(args.name)), join(args.out, "{}-merges.txt".format(args.name)), add_prefix_space=True, ) # Test encoding print(tokenizer.encode("Training ByteLevel BPE is very easy").tokens)
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/examples/custom_components.py
from typing import List import jieba from tokenizers import NormalizedString, PreTokenizedString, Regex, Tokenizer from tokenizers.decoders import Decoder from tokenizers.models import BPE from tokenizers.normalizers import Normalizer from tokenizers.pre_tokenizers import PreTokenizer class JiebaPreTokenizer: def jieba_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]: splits = [] # we need to call `str(normalized_string)` because jieba expects a str, # not a NormalizedString for token, start, stop in jieba.tokenize(str(normalized_string)): splits.append(normalized_string[start:stop]) return splits # We can also easily do it in one line: # return [normalized_string[w[1] : w[2]] for w in jieba.tokenize(str(normalized_string))] def odd_number_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]: # Just an odd example... splits = [] last = 0 for i, char in enumerate(str(normalized_string)): if char.isnumeric() and int(char) % 2 == 1: splits.append(normalized_string[last:i]) last = i # Don't forget the last one splits.append(normalized_string[last:]) return splits def pre_tokenize(self, pretok: PreTokenizedString): # Let's call split on the PreTokenizedString to split using `self.jieba_split` pretok.split(self.jieba_split) # Here we can call `pretok.split` multiple times if we want to apply # different algorithm, but we generally just need to call it once. pretok.split(self.odd_number_split) class CustomDecoder: def decode(self, tokens: List[str]) -> str: return "".join(tokens) class CustomNormalizer: def normalize(self, normalized: NormalizedString): # Most of these can be replaced by a `Sequence` combining some provided Normalizer, # (ie Sequence([ NFKC(), Replace(Regex("\s+"), " "), Lowercase() ]) # and it should be the prefered way. That being said, here is an example of the kind # of things that can be done here: normalized.nfkc() normalized.filter(lambda char: not char.isnumeric()) normalized.replace(Regex("\s+"), " ") normalized.lowercase() # This section shows how to attach these custom components to the Tokenizer tok = Tokenizer(BPE()) tok.normalizer = Normalizer.custom(CustomNormalizer()) tok.pre_tokenizer = PreTokenizer.custom(JiebaPreTokenizer()) tok.decoder = Decoder.custom(CustomDecoder()) input = "永和服装饰品有限公司" print("PreTokenize:", input) print(tok.pre_tokenizer.pre_tokenize_str(input)) # [('永和', (0, 2)), ('服装', (2, 4)), ('饰品', (4, 6)), ('有限公司', (6, 10))] input = "112233" print("PreTokenize:", input) print(tok.pre_tokenizer.pre_tokenize_str(input)) # [('1', (0, 1)), ('122', (1, 4)), ('3', (4, 5)), ('3', (5, 6))] input = "1234 ℌ𝔢𝔩𝔩𝔬 𝔱𝔥𝔢𝔯𝔢 𝓂𝓎 𝒹ℯ𝒶𝓇 𝕕𝕖𝕒𝕣 𝕗𝕣𝕚𝕖𝕟𝕕!" print("Normalize:", input) print(tok.normalizer.normalize_str(input)) # " hello there my dear dear friend!"
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/examples/train_with_datasets.py
import datasets from tokenizers import Tokenizer, models, normalizers, pre_tokenizers, trainers # Build a tokenizer bpe_tokenizer = Tokenizer(models.BPE()) bpe_tokenizer.pre_tokenizer = pre_tokenizers.Whitespace() bpe_tokenizer.normalizer = normalizers.Lowercase() # Initialize a dataset dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train") # Build an iterator over this dataset def batch_iterator(): batch_size = 1000 for batch in dataset.iter(batch_size=batch_size): yield batch["text"] # And finally train bpe_tokenizer.train_from_iterator(batch_iterator(), length=len(dataset))
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/examples/example.py
import argparse import logging import time from tqdm import tqdm logging.getLogger("transformers").disabled = True logging.getLogger("transformers.tokenization_utils").disabled = True from tokenizers import Tokenizer, decoders, pre_tokenizers from tokenizers.models import BPE, WordPiece from tokenizers.normalizers import BertNormalizer from tokenizers.processors import BertProcessing from transformers import BertTokenizer, GPT2Tokenizer parser = argparse.ArgumentParser() parser.add_argument("--type", default="gpt2", type=str, help="The type of tokenizer (bert|gpt2)") parser.add_argument("--file", default=None, type=str, help="The file to encode") parser.add_argument("--vocab", default=None, type=str, required=True, help="The vocab file") parser.add_argument("--merges", default=None, type=str, help="The merges.txt file") parser.add_argument("--debug", action="store_true", help="Verbose output") args = parser.parse_args() if args.type == "gpt2" and args.merges is None: raise Exception("Expected merges.txt file") if args.file is not None: with open(args.file, "r") as fp: text = [line.strip() for line in fp] else: text = """ The Zen of Python, by Tim Peters Beautiful is better than ugly. Explicit is better than implicit. Simple is better than complex. Complex is better than complicated. Flat is better than nested. Sparse is better than dense. Readability counts. Special cases aren't special enough to break the rules. Although practicality beats purity. Errors should never pass silently. Unless explicitly silenced. In the face of ambiguity, refuse the temptation to guess. There should be one-- and preferably only one --obvious way to do it. Although that way may not be obvious at first unless you're Dutch. Now is better than never. Although never is often better than *right* now. If the implementation is hard to explain, it's a bad idea. If the implementation is easy to explain, it may be a good idea. Namespaces are one honking great idea -- let's do more of those! """.split( "\n" ) if args.type == "gpt2": print("Running GPT-2 tokenizer") tok_p = GPT2Tokenizer.from_pretrained("gpt2") # Create a Tokenizer using BPE tok_r = Tokenizer(BPE(args.vocab, args.merges)) # Use ByteLevel PreTokenizer tok_r.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False) # Use ByteLevel Decoder tok_r.decoder = decoders.ByteLevel() elif args.type == "bert": print("Running Bert tokenizer") tok_p = BertTokenizer.from_pretrained(args.vocab) tok_r = Tokenizer(WordPiece(args.vocab, unk_token="[UNK]", max_input_chars_per_word=100)) tok_r.normalizer = BertNormalizer( clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True, ) # tok_r.pre_tokenizer = pre_tokenizers.Whitespace() tok_r.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tok_r.decoder = decoders.WordPiece() tok_r.post_processor = BertProcessing( ("[SEP]", tok_r.token_to_id("[SEP]")), ("[CLS]", tok_r.token_to_id("[CLS]")), ) else: raise Exception(f"Unknown type {args.type}") def tokenize_r(): return tok_r.encode_batch(text) def tokenize_p(): return [tok_p.encode(sentence, add_special_tokens=True) for sentence in tqdm(text)] print(f"Tokenizing {len(text)} lines") # Rust version start = time.time() encoded_r = tokenize_r() end = time.time() time_r = end - start print(f"Rust tokenizer took: {time_r} sec") # Python version start = time.time() encoded_p = tokenize_p() end = time.time() time_p = end - start print(f"Transformer tokenizer took: {time_p} sec") print(f"SpeedUp Ratio: {time_p / time_r}") ids_r = [sentence.ids for sentence in encoded_r] diff_ids = 0 for i in range(0, len(encoded_r)): if encoded_r[i].ids != encoded_p[i]: diff_ids += 1 if args.debug: print(encoded_r[i].ids) print(encoded_p[i]) print(encoded_r[i].tokens) print(tok_p.tokenize(text[i])) print(text[i]) print("") print(f"Ids differences: {diff_ids}") decoded_r = tok_r.decode_batch([sentence.ids for sentence in encoded_r], False) decoded_p = [tok_p.decode(en) for en in encoded_p] diff_decoded = 0 for i in range(0, len(text)): if decoded_r[i] != decoded_p[i]: diff_decoded += 1 if args.debug: print(f"Original: {text[i]}") print(f"Rust: {decoded_r[i]}") print(f"Python: {decoded_p[i]}") print("") print(f"Decoding differences: {diff_decoded}")
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/examples/train_bert_wordpiece.py
import argparse import glob from tokenizers import BertWordPieceTokenizer parser = argparse.ArgumentParser() parser.add_argument( "--files", default=None, metavar="path", type=str, required=True, help="The files to use as training; accept '**/*.txt' type of patterns \ if enclosed in quotes", ) parser.add_argument( "--out", default="./", type=str, help="Path to the output directory, where the files will be saved", ) parser.add_argument("--name", default="bert-wordpiece", type=str, help="The name of the output vocab files") args = parser.parse_args() files = glob.glob(args.files) if not files: print(f"File does not exist: {args.files}") exit(1) # Initialize an empty tokenizer tokenizer = BertWordPieceTokenizer( clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True, ) # And then train tokenizer.train( files, vocab_size=10000, min_frequency=2, show_progress=True, special_tokens=["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"], limit_alphabet=1000, wordpieces_prefix="##", ) # Save the files tokenizer.save_model(args.out, args.name)
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/tests/utils.py
import multiprocessing as mp import os import pytest import requests DATA_PATH = os.path.join("tests", "data") def download(url, with_filename=None): filename = with_filename if with_filename is not None else url.rsplit("/")[-1] filepath = os.path.join(DATA_PATH, filename) if not os.path.exists(filepath): with open(filepath, "wb") as f: response = requests.get(url, stream=True) response.raise_for_status() for chunk in response.iter_content(1024): f.write(chunk) return filepath @pytest.fixture(scope="session") def data_dir(): assert os.getcwd().endswith("python") exist = os.path.exists(DATA_PATH) and os.path.isdir(DATA_PATH) if not exist: os.mkdir(DATA_PATH) @pytest.fixture(scope="session") def roberta_files(data_dir): return { "vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json"), "merges": download("https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt"), } @pytest.fixture(scope="session") def bert_files(data_dir): return { "vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt"), } @pytest.fixture(scope="session") def openai_files(data_dir): return { "vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json"), "merges": download("https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt"), } @pytest.fixture(scope="session") def train_files(data_dir): big = download("https://norvig.com/big.txt") small = os.path.join(DATA_PATH, "small.txt") with open(small, "w") as f: with open(big, "r") as g: for i, line in enumerate(g): f.write(line) if i > 100: break return { "small": small, "big": big, } @pytest.fixture(scope="session") def albert_base(data_dir): return download("https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-tokenizer.json") @pytest.fixture(scope="session") def doc_wiki_tokenizer(data_dir): return download( "https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-quicktour/tokenizer.json", "tokenizer-wiki.json", ) @pytest.fixture(scope="session") def doc_pipeline_bert_tokenizer(data_dir): return download( "https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-pipeline/tokenizer.json", "bert-wiki.json", ) # On MacOS Python 3.8+ the default was modified to `spawn`, we need `fork` in tests. mp.set_start_method("fork") def multiprocessing_with_parallelism(tokenizer, enabled: bool): """ This helper can be used to test that disabling parallelism avoids dead locks when the same tokenizer is used after forking. """ # It's essential to this test that we call 'encode' or 'encode_batch' # before the fork. This causes the main process to "lock" some resources # provided by the Rust "rayon" crate that are needed for parallel processing. tokenizer.encode("Hi") tokenizer.encode_batch(["hi", "there"]) def encode(tokenizer): tokenizer.encode("Hi") tokenizer.encode_batch(["hi", "there"]) # Make sure this environment variable is set before the fork happens os.environ["TOKENIZERS_PARALLELISM"] = str(enabled) p = mp.Process(target=encode, args=(tokenizer,)) p.start() p.join(timeout=1) # At this point the process should have successfully exited, depending on whether parallelism # was activated or not. So we check the status and kill it if needed alive = p.is_alive() if alive: p.terminate() assert (alive and mp.get_start_method() == "fork") == enabled
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/tests/test_serialization.py
import json import os import unittest import tqdm from huggingface_hub import HfApi, cached_download, hf_hub_url from tokenizers import Tokenizer from .utils import albert_base, data_dir class TestSerialization: def test_full_serialization_albert(self, albert_base): # Check we can read this file. # This used to fail because of BufReader that would fail because the # file exceeds the buffer capacity tokenizer = Tokenizer.from_file(albert_base) def check(tokenizer_file) -> bool: with open(tokenizer_file, "r") as f: data = json.load(f) if "pre_tokenizer" not in data: return True if "type" not in data["pre_tokenizer"]: return False if data["pre_tokenizer"]["type"] == "Sequence": for pre_tok in data["pre_tokenizer"]["pretokenizers"]: if "type" not in pre_tok: return False return True def slow(test_case): """ Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. """ if os.getenv("RUN_SLOW") != "1": return unittest.skip("use `RUN_SLOW=1` to run")(test_case) else: return test_case @slow class TestFullDeserialization(unittest.TestCase): def test_full_deserialization_hub(self): # Check we can read this file. # This used to fail because of BufReader that would fail because the # file exceeds the buffer capacity api = HfApi() not_loadable = [] invalid_pre_tokenizer = [] # models = api.list_models(filter="transformers") # for model in tqdm.tqdm(models): # model_id = model.modelId # for model_file in model.siblings: # filename = model_file.rfilename # if filename == "tokenizer.json": # all_models.append((model_id, filename)) all_models = [("HueyNemud/das22-10-camembert_pretrained", "tokenizer.json")] for model_id, filename in tqdm.tqdm(all_models): tokenizer_file = cached_download(hf_hub_url(model_id, filename=filename)) is_ok = check(tokenizer_file) if not is_ok: print(f"{model_id} is affected by no type") invalid_pre_tokenizer.append(model_id) try: Tokenizer.from_file(tokenizer_file) except Exception as e: print(f"{model_id} is not loadable: {e}") not_loadable.append(model_id) except: print(f"{model_id} is not loadable: Rust error") not_loadable.append(model_id) self.assertEqual(invalid_pre_tokenizer, []) self.assertEqual(not_loadable, [])
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_trainers.py
import copy import os import pickle import pytest from tokenizers import ( AddedToken, SentencePieceUnigramTokenizer, Tokenizer, models, normalizers, pre_tokenizers, trainers, ) from ..utils import data_dir, train_files class TestBpeTrainer: def test_can_modify(self): trainer = trainers.BpeTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"], limit_alphabet=13, initial_alphabet=["a", "b", "c"], continuing_subword_prefix="pref", end_of_word_suffix="suf", ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] assert trainer.limit_alphabet == 13 assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] assert trainer.continuing_subword_prefix == "pref" assert trainer.end_of_word_suffix == "suf" # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.limit_alphabet = None assert trainer.limit_alphabet == None trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] trainer.continuing_subword_prefix = None assert trainer.continuing_subword_prefix == None trainer.end_of_word_suffix = None assert trainer.continuing_subword_prefix == None def test_can_pickle(self): assert ( trainers.BpeTrainer(min_frequency=12).__getstate__() == b"""{"BpeTrainer":{"min_frequency":12,"vocab_size":30000,"show_progress":true,"special_tokens":[],"limit_alphabet":null,"initial_alphabet":[],"continuing_subword_prefix":null,"end_of_word_suffix":null,"max_token_length":null,"words":{}}}""" ) assert isinstance(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12))), trainers.BpeTrainer) assert isinstance(copy.deepcopy(trainers.BpeTrainer(min_frequency=12)), trainers.BpeTrainer) # Make sure everything is correct assert pickle.dumps(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12)))) == pickle.dumps( trainers.BpeTrainer(min_frequency=12) ) class TestWordPieceTrainer: def test_can_modify(self): trainer = trainers.WordPieceTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"], limit_alphabet=13, initial_alphabet=["a", "b", "c"], continuing_subword_prefix="pref", end_of_word_suffix="suf", ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] assert trainer.limit_alphabet == 13 assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] assert trainer.continuing_subword_prefix == "pref" assert trainer.end_of_word_suffix == "suf" # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.limit_alphabet = None assert trainer.limit_alphabet == None trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] trainer.continuing_subword_prefix = None assert trainer.continuing_subword_prefix == None trainer.end_of_word_suffix = None assert trainer.continuing_subword_prefix == None def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.WordPieceTrainer())), trainers.WordPieceTrainer) class TestWordLevelTrainer: def test_can_modify(self): trainer = trainers.WordLevelTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"] ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.WordLevelTrainer())), trainers.WordLevelTrainer) class TestUnigram: def test_train(self, train_files): tokenizer = SentencePieceUnigramTokenizer() tokenizer.train(train_files["small"], show_progress=False) filename = "tests/data/unigram_trained.json" tokenizer.save(filename) os.remove(filename) def test_train_parallelism_with_custom_pretokenizer(self, train_files): class GoodCustomPretok: def split(self, n, normalized): # Here we just test that we can return a List[NormalizedString], it # does not really make sense to return twice the same otherwise return [normalized, normalized] def pre_tokenize(self, pretok): pretok.split(self.split) custom = pre_tokenizers.PreTokenizer.custom(GoodCustomPretok()) bpe_tokenizer = Tokenizer(models.BPE()) bpe_tokenizer.normalizer = normalizers.Lowercase() bpe_tokenizer.pre_tokenizer = custom if "TOKENIZERS_PARALLELISM" in os.environ: del os.environ["TOKENIZERS_PARALLELISM"] trainer = trainers.BpeTrainer(special_tokens=["<unk>"], show_progress=False) bpe_tokenizer.train([train_files["small"]], trainer=trainer) def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.UnigramTrainer())), trainers.UnigramTrainer) def test_train_with_special_tokens(self): filename = "tests/data/dummy-unigram-special_tokens-train.txt" with open(filename, "w") as f: f.write( """ [CLS] The Zen of Python, by Tim Peters [SEP] [CLS] Beautiful is better than ugly. [SEP] [CLS] Explicit is better than implicit. [SEP] [CLS] Simple is better than complex. [SEP] [CLS] Complex is better than complicated. [SEP] [CLS] Flat is better than nested. [SEP] [CLS] Sparse is better than dense. [SEP] [CLS] Readability counts. [SEP] [CLS] Special cases aren't special enough to break the rules. [SEP] [CLS] Although practicality beats purity. [SEP] [CLS] Errors should never pass silently. [SEP] [CLS] Unless explicitly silenced. [SEP] [CLS] In the face of ambiguity, refuse the temptation to guess. [SEP] [CLS] There should be one-- and preferably only one --obvious way to do it. [SEP] [CLS] Although that way may not be obvious at first unless you're Dutch. [SEP] [CLS] Now is better than never. [SEP] [CLS] Although never is often better than *right* now. [SEP] [CLS] If the implementation is hard to explain, it's a bad idea. [SEP] [CLS] If the implementation is easy to explain, it may be a good idea. [SEP] [CLS] Namespaces are one honking great idea -- let's do more of those! [SEP] """ ) tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]"], unk_token="[UNK]" ) tokenizer.train([filename], trainer=trainer) assert tokenizer.encode("[CLS] This is a test [SEP]").tokens == [ "[CLS]", " T", "h", "i", "s", " is ", "a", " ", "te", "s", "t ", "[SEP]", ] tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]"], unk_token="[UNK]", vocab_size=100, ) tokenizer.train([filename], trainer=trainer) assert tokenizer.get_vocab_size() == 100 tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]", "[UNK]"], unk_token="[UNK]", vocab_size=100, ) tokenizer.train([filename], trainer=trainer) assert tokenizer.get_vocab_size() == 100 def test_cannot_train_different_model(self): tokenizer = Tokenizer(models.BPE()) trainer = trainers.UnigramTrainer(show_progress=False) with pytest.raises(Exception, match="UnigramTrainer can only train a Unigram"): tokenizer.train([], trainer) def test_can_modify(self): trainer = trainers.UnigramTrainer( vocab_size=12345, show_progress=False, special_tokens=["1", AddedToken("2", lstrip=True)], initial_alphabet=["a", "b", "c"], ) assert trainer.vocab_size == 12345 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", normalized=False, special=True), AddedToken("2", lstrip=True, normalized=False, special=True), ] assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] def test_continuing_prefix_trainer_mistmatch(self): UNK = "[UNK]" special_tokens = [UNK] tokenizer = Tokenizer(models.BPE(unk_token=UNK, continuing_subword_prefix="##")) trainer = trainers.BpeTrainer(special_tokens=special_tokens) tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [pre_tokenizers.Whitespace(), pre_tokenizers.Digits(individual_digits=True)] ) tokenizer.train(files=["data/big.txt"], trainer=trainer) tokenizer.save("data/tokenizer.json") tokenizer.from_file("data/tokenizer.json")
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_models.py
import pickle import pytest from tokenizers.models import BPE, Model, WordLevel, WordPiece from ..utils import bert_files, data_dir, roberta_files class TestBPE: def test_instantiate(self, roberta_files): assert isinstance(BPE(), Model) assert isinstance(BPE(), BPE) vocab = {"a": 0, "b": 1, "ab": 2} merges = [("a", "b")] assert isinstance(BPE(vocab, merges), Model) assert isinstance(BPE.from_file(roberta_files["vocab"], roberta_files["merges"]), BPE) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(vocab=vocab) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(merges=merges) assert isinstance( pickle.loads(pickle.dumps(BPE(vocab, merges))), BPE, ) # Deprecated calls in 0.9 with pytest.deprecated_call(): assert isinstance(BPE(roberta_files["vocab"], roberta_files["merges"]), Model) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(vocab=roberta_files["vocab"]) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(merges=roberta_files["merges"]) with pytest.deprecated_call(): assert isinstance( pickle.loads(pickle.dumps(BPE(roberta_files["vocab"], roberta_files["merges"]))), BPE, ) def test_can_modify(self): model = BPE( dropout=0.5, unk_token="[UNK]", continuing_subword_prefix="__prefix__", end_of_word_suffix="__suffix__", fuse_unk=False, ) assert model.dropout == 0.5 assert model.unk_token == "[UNK]" assert model.continuing_subword_prefix == "__prefix__" assert model.end_of_word_suffix == "__suffix__" assert model.fuse_unk == False assert model.byte_fallback == False # Modify these model.dropout = 0.1 assert pytest.approx(model.dropout) == 0.1 model.unk_token = "<unk>" assert model.unk_token == "<unk>" model.continuing_subword_prefix = None assert model.continuing_subword_prefix == None model.end_of_word_suffix = "suff" assert model.end_of_word_suffix == "suff" model.fuse_unk = True assert model.fuse_unk == True model.byte_fallback = True assert model.byte_fallback == True class TestWordPiece: def test_instantiate(self, bert_files): assert isinstance(WordPiece(), Model) assert isinstance(WordPiece(), WordPiece) vocab = {"a": 0, "b": 1, "ab": 2} assert isinstance(WordPiece(vocab), Model) assert isinstance(WordPiece(vocab), WordPiece) assert isinstance(WordPiece.from_file(bert_files["vocab"]), WordPiece) assert isinstance(pickle.loads(pickle.dumps(WordPiece(vocab))), WordPiece) # Deprecated calls in 0.9 with pytest.deprecated_call(): assert isinstance(WordPiece(bert_files["vocab"]), Model) with pytest.deprecated_call(): assert isinstance(pickle.loads(pickle.dumps(WordPiece(bert_files["vocab"]))), WordPiece) def test_can_modify(self): model = WordPiece( unk_token="<oov>", continuing_subword_prefix="__prefix__", max_input_chars_per_word=200, ) assert model.unk_token == "<oov>" assert model.continuing_subword_prefix == "__prefix__" assert model.max_input_chars_per_word == 200 # Modify these model.unk_token = "<unk>" assert model.unk_token == "<unk>" model.continuing_subword_prefix = "$$$" assert model.continuing_subword_prefix == "$$$" model.max_input_chars_per_word = 10 assert model.max_input_chars_per_word == 10 class TestWordLevel: def test_instantiate(self, roberta_files): assert isinstance(WordLevel(), Model) assert isinstance(WordLevel(), WordLevel) vocab = {"a": 0, "b": 1, "ab": 2} assert isinstance(WordLevel(vocab), Model) assert isinstance(WordLevel(vocab), WordLevel) assert isinstance(WordLevel.from_file(roberta_files["vocab"]), WordLevel) # The WordLevel model expects a vocab.json using the same format as roberta # so we can just try to load with this file with pytest.deprecated_call(): assert isinstance(WordLevel(roberta_files["vocab"]), Model) with pytest.deprecated_call(): assert isinstance(WordLevel(roberta_files["vocab"]), WordLevel) def test_can_modify(self): model = WordLevel(unk_token="<oov>") assert model.unk_token == "<oov>" # Modify these model.unk_token = "<unk>" assert model.unk_token == "<unk>"
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_tokenizer.py
import pickle import numpy as np import pytest from tokenizers import AddedToken, Encoding, Tokenizer from tokenizers.implementations import BertWordPieceTokenizer from tokenizers.models import BPE, Model, WordPiece, Unigram from tokenizers.normalizers import Lowercase from tokenizers.pre_tokenizers import ByteLevel from tokenizers.processors import BertProcessing, RobertaProcessing from ..utils import bert_files, data_dir, multiprocessing_with_parallelism, roberta_files class TestAddedToken: def test_instantiate_with_content_only(self): added_token = AddedToken("<mask>") added_token.content = "<MASK>" assert added_token.content == "<MASK>" assert type(added_token) == AddedToken added_token.content = added_token.content.lower() assert added_token.special == False added_token.special = True assert added_token.special == True added_token.special = False assert str(added_token) == "<mask>" assert ( repr(added_token) == 'AddedToken("<mask>", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False)' ) assert added_token.rstrip == False assert added_token.lstrip == False assert added_token.single_word == False assert added_token.normalized == True assert isinstance(pickle.loads(pickle.dumps(added_token)), AddedToken) def test_can_set_rstrip(self): added_token = AddedToken("<mask>", rstrip=True) assert added_token.rstrip == True assert added_token.lstrip == False assert added_token.single_word == False assert added_token.normalized == True def test_can_set_lstrip(self): added_token = AddedToken("<mask>", lstrip=True) assert added_token.rstrip == False assert added_token.lstrip == True assert added_token.single_word == False assert added_token.normalized == True def test_can_set_single_world(self): added_token = AddedToken("<mask>", single_word=True) assert added_token.rstrip == False assert added_token.lstrip == False assert added_token.single_word == True assert added_token.normalized == True def test_can_set_normalized(self): added_token = AddedToken("<mask>", normalized=False) assert added_token.rstrip == False assert added_token.lstrip == False assert added_token.single_word == False assert added_token.normalized == False class TestTokenizer: def test_has_expected_type_and_methods(self): tokenizer = Tokenizer(BPE()) assert type(tokenizer) == Tokenizer assert callable(tokenizer.num_special_tokens_to_add) assert callable(tokenizer.get_vocab) assert callable(tokenizer.get_vocab_size) assert callable(tokenizer.enable_truncation) assert callable(tokenizer.no_truncation) assert callable(tokenizer.enable_padding) assert callable(tokenizer.no_padding) assert callable(tokenizer.encode) assert callable(tokenizer.encode_batch) assert callable(tokenizer.decode) assert callable(tokenizer.decode_batch) assert callable(tokenizer.token_to_id) assert callable(tokenizer.id_to_token) assert callable(tokenizer.add_tokens) assert callable(tokenizer.add_special_tokens) assert callable(tokenizer.train) assert callable(tokenizer.post_process) assert isinstance(tokenizer.model, Model) assert tokenizer.normalizer is None assert tokenizer.pre_tokenizer is None assert tokenizer.post_processor is None assert tokenizer.decoder is None assert isinstance(pickle.loads(pickle.dumps(Tokenizer(BPE()))), Tokenizer) def test_add_tokens(self): tokenizer = Tokenizer(BPE()) added = tokenizer.add_tokens(["my", "name", "is", "john"]) assert added == 4 tokens = [AddedToken("the"), AddedToken("quick", normalized=False), AddedToken()] assert tokens[0].normalized == True added = tokenizer.add_tokens(tokens) assert added == 2 assert tokens[0].normalized == True assert tokens[1].normalized == False def test_add_special_tokens(self): tokenizer = Tokenizer(BPE()) # Can add special tokens as `str` added = tokenizer.add_special_tokens(["my", "name", "is", "john"]) assert added == 4 # Can add special tokens as `AddedToken` tokens = [AddedToken("the"), AddedToken("quick", normalized=True), AddedToken()] assert tokens[0].normalized == True added = tokenizer.add_special_tokens(tokens) assert added == 2 assert tokens[0].normalized == False assert tokens[1].normalized == True def test_encode(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # Can encode single sequence output = tokenizer.encode("my name is john") assert output.tokens == ["my", "name", "is", "john"] assert type(output.ids) == list assert type(output.type_ids) == list assert type(output.offsets) == list with pytest.warns(DeprecationWarning): assert type(output.words) == list assert type(output.word_ids) == list assert type(output.special_tokens_mask) == list assert type(output.attention_mask) == list assert type(output.overflowing) == list # Can encode a pair of sequences output = tokenizer.encode("my name is john", "pair") assert output.tokens == ["my", "name", "is", "john", "pair"] assert isinstance(pickle.loads(pickle.dumps(output)), Encoding) # Can encode a single pre-tokenized sequence output = tokenizer.encode(["my", "name", "is", "john"], is_pretokenized=True) assert output.tokens == ["my", "name", "is", "john"] # Can encode a batch with both a single sequence and a pair of sequences output = tokenizer.encode_batch(["my name is john", ("my name is john", "pair")]) assert len(output) == 2 def test_encode_formats(self, bert_files): with pytest.deprecated_call(): tokenizer = BertWordPieceTokenizer(bert_files["vocab"]) # Encode output = tokenizer.encode("my name is john") assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]"] output = tokenizer.encode("my name is john", "pair") assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"] output = tokenizer.encode(["my", "name", "is", "john"], is_pretokenized=True) assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]"] output = tokenizer.encode(["my", "name", "is", "john"], ["pair"], is_pretokenized=True) assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"] # Encode batch result_single = [ ["[CLS]", "my", "name", "is", "john", "[SEP]"], ["[CLS]", "my", "name", "is", "georges", "[SEP]"], ] result_pair = [ ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"], ["[CLS]", "my", "name", "is", "georges", "[SEP]", "pair", "[SEP]"], ] def format(encodings): return [e.tokens for e in encodings] def test_single(input, is_pretokenized=False): output = tokenizer.encode_batch(input, is_pretokenized=is_pretokenized) assert format(output) == result_single def test_pair(input, is_pretokenized=False): output = tokenizer.encode_batch(input, is_pretokenized=is_pretokenized) assert format(output) == result_pair # Classic inputs # Lists test_single(["My name is John", "My name is Georges"]) test_pair([("my name is john", "pair"), ("my name is georges", "pair")]) test_pair([["my name is john", "pair"], ["my name is georges", "pair"]]) # Tuples test_single(("My name is John", "My name is Georges")) test_pair((("My name is John", "pair"), ("My name is Georges", "pair"))) # Numpy test_single(np.array(["My name is John", "My name is Georges"])) test_pair(np.array([("My name is John", "pair"), ("My name is Georges", "pair")])) test_pair(np.array([["My name is John", "pair"], ["My name is Georges", "pair"]])) # PreTokenized inputs # Lists test_single([["My", "name", "is", "John"], ["My", "name", "is", "Georges"]], True) test_pair( [ (["My", "name", "is", "John"], ["pair"]), (["My", "name", "is", "Georges"], ["pair"]), ], True, ) test_pair( [ [["My", "name", "is", "John"], ["pair"]], [["My", "name", "is", "Georges"], ["pair"]], ], True, ) # Tuples test_single((("My", "name", "is", "John"), ("My", "name", "is", "Georges")), True) test_pair( ( (("My", "name", "is", "John"), ("pair",)), (("My", "name", "is", "Georges"), ("pair",)), ), True, ) test_pair( ( (["My", "name", "is", "John"], ["pair"]), (["My", "name", "is", "Georges"], ["pair"]), ), True, ) # Numpy test_single( np.array([["My", "name", "is", "John"], ["My", "name", "is", "Georges"]]), True, ) test_single( np.array((("My", "name", "is", "John"), ("My", "name", "is", "Georges"))), True, ) test_pair( np.array( [ [["My", "name", "is", "John"], ["pair"]], [["My", "name", "is", "Georges"], ["pair"]], ], dtype=object, ), True, ) test_pair( np.array( ( (("My", "name", "is", "John"), ("pair",)), (("My", "name", "is", "Georges"), ("pair",)), ), dtype=object, ), True, ) # Mal formed with pytest.raises(TypeError, match="TextInputSequence must be str"): tokenizer.encode([["my", "name"]]) with pytest.raises(TypeError, match="TextInputSequence must be str"): tokenizer.encode("My name is john", [["pair"]]) with pytest.raises(TypeError, match="TextInputSequence must be str"): tokenizer.encode("my name is john", ["pair"]) with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"): tokenizer.encode("My name is john", is_pretokenized=True) with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"): tokenizer.encode("My name is john", ["pair"], is_pretokenized=True) with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"): tokenizer.encode(["My", "name", "is", "John"], "pair", is_pretokenized=True) def test_encode_add_special_tokens(self, roberta_files): with pytest.deprecated_call(): tokenizer = Tokenizer(BPE(roberta_files["vocab"], roberta_files["merges"])) tokenizer.add_special_tokens(["<s>", "</s>"]) tokenizer.pre_tokenizer = ByteLevel(add_prefix_space=True) tokenizer.post_processor = RobertaProcessing( ("</s>", tokenizer.token_to_id("</s>")), ("<s>", tokenizer.token_to_id("<s>")), ) # Can encode with special tokens output_with_specials = tokenizer.encode("My name is John", add_special_tokens=True) assert output_with_specials.tokens == ["<s>", "ĠMy", "Ġname", "Ġis", "ĠJohn", "</s>"] # Can encode without special tokens output_without_specials = tokenizer.encode("My name is John", add_special_tokens=False) assert output_without_specials.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"] def test_truncation(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.enable_truncation(2) # Can truncate single sequences output = tokenizer.encode("my name is john") assert output.tokens == ["my", "name"] # Can truncate pair sequences as well output = tokenizer.encode("my name is john", "pair") assert output.tokens == ["my", "pair"] # Can get the params and give them to enable_truncation trunc = tokenizer.truncation tokenizer.enable_truncation(**trunc) # Left truncation direction tokenizer.enable_truncation(2, direction="left") output = tokenizer.encode("my name is john") assert output.tokens == ["is", "john"] output = tokenizer.encode("my name is john", "pair") assert output.tokens == ["john", "pair"] def test_padding(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # By default it does nothing when encoding single sequence tokenizer.enable_padding() output = tokenizer.encode("my name") assert output.tokens == ["my", "name"] # Can pad to the longest in a batch output = tokenizer.encode_batch(["my name", "my name is john"]) assert all([len(encoding) == 4 for encoding in output]) # Can pad to the specified length otherwise tokenizer.enable_padding(length=4) output = tokenizer.encode("my name") assert output.tokens == ["my", "name", "[PAD]", "[PAD]"] output = tokenizer.encode("my name", "pair") assert output.tokens == ["my", "name", "pair", "[PAD]"] # Can get the params and give them to enable_padding padding = tokenizer.padding tokenizer.enable_padding(**padding) def test_decode(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # Can decode single sequences output = tokenizer.decode([0, 1, 2, 3]) assert output == "my name is john" # Can decode batch output = tokenizer.decode_batch([[0, 1, 2, 3], [4]]) assert output == ["my name is john", "pair"] def test_get_vocab(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # Can retrieve vocab with added tokens vocab = tokenizer.get_vocab(with_added_tokens=True) assert vocab == {"is": 2, "john": 3, "my": 0, "name": 1, "pair": 4} # Can retrieve vocab without added tokens vocab = tokenizer.get_vocab(with_added_tokens=False) assert vocab == {} # Can retrieve added token decoder vocab = tokenizer.get_added_tokens_decoder() assert vocab == { 0: AddedToken("my", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), 1: AddedToken("name", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), 2: AddedToken("is", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), 3: AddedToken("john", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), 4: AddedToken("pair", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), } def test_get_vocab_size(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # Can retrieve vocab's size with added tokens size = tokenizer.get_vocab_size(with_added_tokens=True) assert size == 5 # Can retrieve vocab's size without added tokens size = tokenizer.get_vocab_size(with_added_tokens=False) assert size == 0 def test_post_process(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.enable_truncation(2) tokenizer.enable_padding(length=4) encoding = tokenizer.encode("my name is john") pair_encoding = tokenizer.encode("pair") # Can post process a single encoding output = tokenizer.post_process(encoding) assert output.tokens == ["my", "name", "[PAD]", "[PAD]"] # Can post process a pair of encodings output = tokenizer.post_process(encoding, pair_encoding) assert output.tokens == ["my", "pair", "[PAD]", "[PAD]"] def test_multiprocessing_with_parallelism(self): tokenizer = Tokenizer(BPE()) multiprocessing_with_parallelism(tokenizer, False) multiprocessing_with_parallelism(tokenizer, True) def test_from_pretrained(self): tokenizer = Tokenizer.from_pretrained("bert-base-cased") output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False) assert output.tokens == ["Hey", "there", "dear", "friend", "!"] def test_from_pretrained_revision(self): tokenizer = Tokenizer.from_pretrained("anthony/tokenizers-test") output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False) assert output.tokens == ["hey", "there", "dear", "friend", "!"] tokenizer = Tokenizer.from_pretrained("anthony/tokenizers-test", revision="gpt-2") output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False) assert output.tokens == ["Hey", "Ġthere", "Ġdear", "Ġfriend", "!"] def test_unigram_byte_fallback(self): vocab = [ ("<unk>", 0.0), ("A", -0.01), ("sen", -0.02), ("te", -0.03), ("n", -0.04), ("ce", -0.05), ("<0xF0>", -0.06), ("<0x9F>", -0.06), ("<0xA4>", -0.06), ("<0x97>", -0.06), (" ", -0.4), ] tokenizer = tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=False)) output = tokenizer.encode("A sentence 🤗") assert output.ids == [1, 10, 2, 3, 4, 5, 10, 0] assert output.tokens == ["A", " ", "sen", "te", "n", "ce", " ", "🤗"] tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=True)) output = tokenizer.encode("A sentence 🤗") assert output.ids == [1, 10, 2, 3, 4, 5, 10, 6, 7, 8, 9] assert output.tokens == ["A", " ", "sen", "te", "n", "ce", " ", "<0xF0>", "<0x9F>", "<0xA4>", "<0x97>"]
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_decoders.py
import json import pickle import pytest from tokenizers.decoders import ( CTC, BPEDecoder, ByteLevel, Decoder, Metaspace, Sequence, WordPiece, ByteFallback, Replace, Strip, Fuse, ) class TestByteLevel: def test_instantiate(self): assert ByteLevel() is not None assert isinstance(ByteLevel(), Decoder) assert isinstance(ByteLevel(), ByteLevel) assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel) def test_decoding(self): decoder = ByteLevel() assert decoder.decode(["My", "Ġname", "Ġis", "ĠJohn"]) == "My name is John" def test_manual_reload(self): byte_level = ByteLevel() state = json.loads(byte_level.__getstate__()) reloaded = ByteLevel(**state) assert isinstance(reloaded, ByteLevel) class TestReplace: def test_instantiate(self): assert Replace("_", " ") is not None assert isinstance(Replace("_", " "), Decoder) assert isinstance(Replace("_", " "), Replace) # assert isinstance(pickle.loads(pickle.dumps(Replace("_", " "))), Replace) def test_decoding(self): decoder = Replace("_", " ") assert decoder.decode(["My", "_name", "_is", "_John"]) == "My name is John" class TestWordPiece: def test_instantiate(self): assert WordPiece() is not None assert WordPiece(prefix="__") is not None assert WordPiece(cleanup=True) is not None assert isinstance(WordPiece(), Decoder) assert isinstance(WordPiece(), WordPiece) assert isinstance(pickle.loads(pickle.dumps(WordPiece())), WordPiece) def test_decoding(self): decoder = WordPiece() assert decoder.decode(["My", "na", "##me", "is", "Jo", "##hn"]) == "My name is John" assert decoder.decode(["I", "'m", "Jo", "##hn"]) == "I'm John" decoder = WordPiece(prefix="__", cleanup=False) assert decoder.decode(["My", "na", "__me", "is", "Jo", "__hn"]) == "My name is John" assert decoder.decode(["I", "'m", "Jo", "__hn"]) == "I 'm John" def test_can_modify(self): decoder = WordPiece(prefix="$$", cleanup=False) assert decoder.prefix == "$$" assert decoder.cleanup == False # Modify these decoder.prefix = "__" assert decoder.prefix == "__" decoder.cleanup = True assert decoder.cleanup == True class TestByteFallback: def test_instantiate(self): assert ByteFallback() is not None assert isinstance(ByteFallback(), Decoder) assert isinstance(ByteFallback(), ByteFallback) assert isinstance(pickle.loads(pickle.dumps(ByteFallback())), ByteFallback) def test_decoding(self): decoder = ByteFallback() assert decoder.decode(["My", " na", "me"]) == "My name" assert decoder.decode(["<0x61>"]) == "a" assert decoder.decode(["<0xE5>"]) == "�" assert decoder.decode(["<0xE5>", "<0x8f>"]) == "��" assert decoder.decode(["<0xE5>", "<0x8f>", "<0xab>"]) == "叫" assert decoder.decode(["<0xE5>", "<0x8f>", "a"]) == "��a" assert decoder.decode(["<0xE5>", "<0x8f>", "<0xab>", "a"]) == "叫a" class TestFuse: def test_instantiate(self): assert Fuse() is not None assert isinstance(Fuse(), Decoder) assert isinstance(Fuse(), Fuse) assert isinstance(pickle.loads(pickle.dumps(Fuse())), Fuse) def test_decoding(self): decoder = Fuse() assert decoder.decode(["My", " na", "me"]) == "My name" class TestStrip: def test_instantiate(self): assert Strip(left=0, right=0) is not None assert isinstance(Strip(content="_", left=0, right=0), Decoder) assert isinstance(Strip(content="_", left=0, right=0), Strip) assert isinstance(pickle.loads(pickle.dumps(Strip(content="_", left=0, right=0))), Strip) def test_decoding(self): decoder = Strip(content="_", left=1, right=0) assert decoder.decode(["_My", " na", "me", " _-", "__-"]) == "My name _-_-" class TestMetaspace: def test_instantiate(self): assert Metaspace() is not None assert Metaspace(replacement="-") is not None with pytest.raises(ValueError, match="expected a string of length 1"): Metaspace(replacement="") assert Metaspace(add_prefix_space=True) is not None assert isinstance(Metaspace(), Decoder) assert isinstance(Metaspace(), Metaspace) assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace) def test_decoding(self): decoder = Metaspace() assert decoder.decode(["▁My", "▁name", "▁is", "▁John"]) == "My name is John" decoder = Metaspace(replacement="-", add_prefix_space=False) assert decoder.decode(["-My", "-name", "-is", "-John"]) == " My name is John" def test_can_modify(self): decoder = Metaspace(replacement="*", add_prefix_space=False) assert decoder.replacement == "*" assert decoder.add_prefix_space == False # Modify these decoder.replacement = "&" assert decoder.replacement == "&" decoder.add_prefix_space = True assert decoder.add_prefix_space == True class TestBPEDecoder: def test_instantiate(self): assert BPEDecoder() is not None assert BPEDecoder(suffix="_") is not None assert isinstance(BPEDecoder(), Decoder) assert isinstance(BPEDecoder(), BPEDecoder) assert isinstance(pickle.loads(pickle.dumps(BPEDecoder())), BPEDecoder) def test_decoding(self): decoder = BPEDecoder() assert decoder.decode(["My</w>", "na", "me</w>", "is</w>", "Jo", "hn</w>"]) == "My name is John" decoder = BPEDecoder(suffix="_") assert decoder.decode(["My_", "na", "me_", "is_", "Jo", "hn_"]) == "My name is John" def test_can_modify(self): decoder = BPEDecoder(suffix="123") assert decoder.suffix == "123" # Modify these decoder.suffix = "</w>" assert decoder.suffix == "</w>" class TestCTCDecoder: def test_instantiate(self): assert CTC() is not None assert CTC(pad_token="[PAD]") is not None assert isinstance(CTC(), Decoder) assert isinstance(CTC(), CTC) assert isinstance(pickle.loads(pickle.dumps(CTC())), CTC) def test_decoding(self): decoder = CTC() assert ( decoder.decode(["<pad>", "<pad>", "h", "e", "e", "l", "l", "<pad>", "l", "o", "o", "o", "<pad>"]) == "hello" ) decoder = CTC(pad_token="[PAD]") assert ( decoder.decode(["[PAD]", "[PAD]", "h", "e", "e", "l", "l", "[PAD]", "l", "o", "o", "o", "[PAD]"]) == "hello" ) def test_can_modify(self): decoder = CTC(pad_token="[PAD]") assert decoder.pad_token == "[PAD]" assert decoder.word_delimiter_token == "|" assert decoder.cleanup == True # Modify these decoder.pad_token = "{pad}" assert decoder.pad_token == "{pad}" decoder.word_delimiter_token = "_" assert decoder.word_delimiter_token == "_" decoder.cleanup = False assert decoder.cleanup == False class TestSequenceDecoder: def test_instantiate(self): assert Sequence([]) is not None assert Sequence([CTC()]) is not None assert isinstance(Sequence([]), Decoder) assert isinstance(Sequence([]), Sequence) serialized = pickle.dumps(Sequence([])) assert isinstance(pickle.loads(serialized), Sequence) def test_decoding(self): decoder = Sequence([CTC(), Metaspace()]) initial = ["▁", "▁", "H", "H", "i", "i", "▁", "y", "o", "u"] expected = "Hi you" assert decoder.decode(initial) == expected
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_processors.py
import json import pickle import pytest from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.pre_tokenizers import ByteLevel as ByteLevelPreTokenizer from tokenizers.processors import ( BertProcessing, ByteLevel, PostProcessor, RobertaProcessing, Sequence, TemplateProcessing, ) from ..utils import data_dir, roberta_files class TestBertProcessing: def test_instantiate(self): processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1)) assert processor is not None assert isinstance(processor, PostProcessor) assert isinstance(processor, BertProcessing) assert isinstance( pickle.loads(pickle.dumps(BertProcessing(("[SEP]", 0), ("[CLS]", 1)))), BertProcessing, ) def test_processing(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["[SEP]", "[CLS]"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1)) output = tokenizer.encode("my name", "pair") assert output.tokens == ["[CLS]", "my", "name", "[SEP]", "pair", "[SEP]"] assert output.ids == [1, 2, 3, 0, 6, 0] class TestRobertaProcessing: def test_instantiate(self): processor = RobertaProcessing(("</s>", 1), ("<s>", 0)) assert processor is not None assert isinstance(processor, PostProcessor) assert isinstance(processor, RobertaProcessing) assert isinstance( pickle.loads(pickle.dumps(RobertaProcessing(("</s>", 1), ("<s>", 0)))), RobertaProcessing, ) def test_processing(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["<s>", "</s>"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = RobertaProcessing(("</s>", 1), ("<s>", 0)) output = tokenizer.encode("my name", "pair") assert output.tokens == ["<s>", "my", "name", "</s>", "</s>", "pair", "</s>"] assert output.ids == [0, 2, 3, 1, 1, 6, 1] class TestByteLevelProcessing: def test_instantiate(self): assert ByteLevel() is not None assert ByteLevel(trim_offsets=True) is not None assert isinstance(ByteLevel(), PostProcessor) assert isinstance(ByteLevel(), ByteLevel) assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel) def test_processing(self, roberta_files): # Deprecated in 0.9 with pytest.deprecated_call(): tokenizer = Tokenizer(BPE(roberta_files["vocab"], roberta_files["merges"])) tokenizer.pre_tokenizer = ByteLevelPreTokenizer(add_prefix_space=True) # Keeps original offsets output = tokenizer.encode("My name is John") assert output.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"] assert output.offsets == [(0, 2), (2, 7), (7, 10), (10, 15)] # Trims offsets when activated tokenizer.post_processor = ByteLevel(trim_offsets=True) output = tokenizer.encode("My name is John") assert output.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"] assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15)] def test_manual_reload(self): byte_level = ByteLevel() state = json.loads(byte_level.__getstate__()) reloaded = ByteLevel(**state) assert isinstance(reloaded, ByteLevel) class TestTemplateProcessing: def get_bert(self): return TemplateProcessing( single=["[CLS]", "$0", "[SEP]"], pair=["[CLS]", "$A", "[SEP]", "$B:1", "[SEP]:1"], special_tokens=[("[CLS]", 1), ("[SEP]", 0)], ) def get_roberta(self): return TemplateProcessing( single="<s> $0 </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[("<s>", 0), ("</s>", 1)], ) def get_t5_squad(self): # >>> from transformers import AutoTokenizer # >>> tok = AutoTokenizer.from_pretrained("t5-small") # >>> tok.tokenize("question: ") # ['▁question', ':'] # >>> tok.tokenize("context: ") # ['▁context', ':'] # >>> tok.encode("context: ") # [2625, 10] # >>> tok.encode("question: ") # [822, 10] return TemplateProcessing( single=["$0"], pair=["Q", "$A", "C", "$B"], special_tokens=[ { "id": "Q", "ids": [2625, 10], "tokens": ["_question", ":"], }, { "id": "C", "ids": [822, 10], "tokens": ["_context", ":"], }, ], ) def test_instantiate(self): bert = self.get_bert() assert bert is not None assert isinstance(bert, PostProcessor) assert isinstance(bert, TemplateProcessing) assert isinstance(pickle.loads(pickle.dumps(bert)), TemplateProcessing) # It is absolutely legal to have tokens with spaces in the name: processor = TemplateProcessing( single=["[ C L S ]", "Token with space"], special_tokens=[("[ C L S ]", 0), ("Token with space", 1)], ) # Sequence identifiers must be well formed: with pytest.raises(Exception, match="Cannot build Piece"): processor = TemplateProcessing(single="[CLS] $$ [SEP]") with pytest.raises(Exception, match="Cannot build Piece"): processor = TemplateProcessing(single="[CLS] $A: [SEP]") # Special tokens must be provided when used in template: with pytest.raises(Exception, match="Missing SpecialToken\\(s\\) with id\\(s\\)"): processor = TemplateProcessing(single=["[CLS]"]) def test_bert_parity(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["[SEP]", "[CLS]"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1)) original = tokenizer.encode("my name", "pair") tokenizer.post_processor = self.get_bert() template = tokenizer.encode("my name", "pair") assert original.ids == template.ids def test_roberta_parity(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["<s>", "</s>"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = RobertaProcessing(("</s>", 1), ("<s>", 0)) original = tokenizer.encode("my name is john", "pair") tokenizer.post_processor = self.get_roberta() template = tokenizer.encode("my name is john", "pair") assert original.ids == template.ids class TestSequenceProcessing: def test_sequence_processing(self): assert Sequence([]) is not None assert Sequence([ByteLevel()]) is not None assert isinstance(Sequence([]), PostProcessor) assert isinstance(Sequence([]), Sequence) serialized = pickle.dumps(Sequence([])) assert isinstance(pickle.loads(serialized), Sequence) def test_post_process(self): byte_level = ByteLevel(trim_offsets=True) template = TemplateProcessing( single=["[CLS]", "$0", "[SEP]"], pair=["[CLS]:0", "$A", "[SEP]:0", "$B:1", "[SEP]:1"], special_tokens=[("[CLS]", 1), ("[SEP]", 0)], ) tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["[SEP]", "[CLS]"]) tokenizer.add_tokens(["my", "name", "is", "Ġjohn", "pair"]) tokenizer.post_processor = template # Before the sequence original = tokenizer.encode("my name is Ġjohn") assert original.ids == [1, 2, 3, 4, 5, 0] assert original.type_ids == [0, 0, 0, 0, 0, 0] assert original.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 16), (0, 0)] pair = tokenizer.encode("my name is Ġjohn", "pair") # assert pair.ids == [1, 2, 3, 4, 5, 0, 6, 0] assert pair.type_ids == [0, 0, 0, 0, 0, 0, 1, 1] assert pair.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 16), (0, 0), (0, 4), (0, 0)] processor = Sequence([byte_level, template]) tokenizer.post_processor = processor original = tokenizer.encode("my name is Ġjohn") assert original.ids == [1, 2, 3, 4, 5, 0] assert original.type_ids == [0, 0, 0, 0, 0, 0] # Offsets ARE trimmed assert original.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (12, 16), (0, 0)] pair = tokenizer.encode("my name is Ġjohn", "pair") # assert pair.ids == [1, 2, 3, 4, 5, 0, 6, 0] assert pair.type_ids == [0, 0, 0, 0, 0, 0, 1, 1] assert pair.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (12, 16), (0, 0), (0, 4), (0, 0)]
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_pre_tokenizers.py
import json import pickle import pytest from tokenizers.pre_tokenizers import ( BertPreTokenizer, ByteLevel, CharDelimiterSplit, Digits, Metaspace, PreTokenizer, Punctuation, Sequence, Split, UnicodeScripts, Whitespace, WhitespaceSplit, ) class TestByteLevel: def test_instantiate(self): assert ByteLevel() is not None assert ByteLevel(add_prefix_space=True) is not None assert ByteLevel(add_prefix_space=False) is not None assert isinstance(ByteLevel(), PreTokenizer) assert isinstance(ByteLevel(), ByteLevel) assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel) def test_has_alphabet(self): assert isinstance(ByteLevel.alphabet(), list) assert len(ByteLevel.alphabet()) == 256 def test_can_modify(self): pretok = ByteLevel(add_prefix_space=False) assert pretok.add_prefix_space == False # Modify these pretok.add_prefix_space = True assert pretok.add_prefix_space == True def test_manual_reload(self): byte_level = ByteLevel() state = json.loads(byte_level.__getstate__()) reloaded = ByteLevel(**state) assert isinstance(reloaded, ByteLevel) class TestSplit: def test_instantiate(self): pre_tokenizer = Split(pattern=" ", behavior="removed") assert pre_tokenizer is not None assert isinstance(pre_tokenizer, PreTokenizer) assert isinstance(pre_tokenizer, Split) assert isinstance(pickle.loads(pickle.dumps(Split(" ", "removed"))), Split) # test with invert=True pre_tokenizer_with_invert = Split(pattern=" ", behavior="isolated", invert=True) assert pre_tokenizer_with_invert is not None assert isinstance(pre_tokenizer_with_invert, PreTokenizer) assert isinstance(pre_tokenizer_with_invert, Split) assert isinstance(pickle.loads(pickle.dumps(Split(" ", "removed", True))), Split) class TestWhitespace: def test_instantiate(self): assert Whitespace() is not None assert isinstance(Whitespace(), PreTokenizer) assert isinstance(Whitespace(), Whitespace) assert isinstance(pickle.loads(pickle.dumps(Whitespace())), Whitespace) class TestWhitespaceSplit: def test_instantiate(self): assert WhitespaceSplit() is not None assert isinstance(WhitespaceSplit(), PreTokenizer) assert isinstance(WhitespaceSplit(), WhitespaceSplit) assert isinstance(pickle.loads(pickle.dumps(WhitespaceSplit())), WhitespaceSplit) class TestBertPreTokenizer: def test_instantiate(self): assert BertPreTokenizer() is not None assert isinstance(BertPreTokenizer(), PreTokenizer) assert isinstance(BertPreTokenizer(), BertPreTokenizer) assert isinstance(pickle.loads(pickle.dumps(BertPreTokenizer())), BertPreTokenizer) class TestMetaspace: def test_instantiate(self): assert Metaspace() is not None assert Metaspace(replacement="-") is not None with pytest.raises(ValueError, match="expected a string of length 1"): Metaspace(replacement="") assert Metaspace(add_prefix_space=True) is not None assert isinstance(Metaspace(), PreTokenizer) assert isinstance(Metaspace(), Metaspace) assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace) def test_can_modify(self): pretok = Metaspace(replacement="$", add_prefix_space=False) assert pretok.replacement == "$" assert pretok.add_prefix_space == False # Modify these pretok.replacement = "%" assert pretok.replacement == "%" pretok.add_prefix_space = True assert pretok.add_prefix_space == True pretok.prepend_scheme = "never" assert pretok.prepend_scheme == "never" class TestCharDelimiterSplit: def test_instantiate(self): assert CharDelimiterSplit("-") is not None with pytest.raises(ValueError, match="expected a string of length 1"): CharDelimiterSplit("") assert isinstance(CharDelimiterSplit(" "), PreTokenizer) assert isinstance(CharDelimiterSplit(" "), CharDelimiterSplit) assert isinstance(pickle.loads(pickle.dumps(CharDelimiterSplit("-"))), CharDelimiterSplit) def test_can_modify(self): pretok = CharDelimiterSplit("@") assert pretok.delimiter == "@" # Modify these pretok.delimiter = "!" assert pretok.delimiter == "!" class TestPunctuation: def test_instantiate(self): assert Punctuation() is not None assert Punctuation("removed") is not None assert isinstance(Punctuation(), PreTokenizer) assert isinstance(Punctuation(), Punctuation) assert isinstance(pickle.loads(pickle.dumps(Punctuation())), Punctuation) class TestSequence: def test_instantiate(self): assert Sequence([]) is not None assert isinstance(Sequence([]), PreTokenizer) assert isinstance(Sequence([]), Sequence) dumped = pickle.dumps(Sequence([])) assert isinstance(pickle.loads(dumped), Sequence) def test_bert_like(self): pre_tokenizer = Sequence([WhitespaceSplit(), Punctuation()]) assert isinstance(Sequence([]), PreTokenizer) assert isinstance(Sequence([]), Sequence) assert isinstance(pickle.loads(pickle.dumps(pre_tokenizer)), Sequence) result = pre_tokenizer.pre_tokenize_str("Hey friend! How are you?!?") assert result == [ ("Hey", (0, 3)), ("friend", (4, 10)), ("!", (10, 11)), ("How", (16, 19)), ("are", (20, 23)), ("you", (24, 27)), ("?", (27, 28)), ("!", (28, 29)), ("?", (29, 30)), ] class TestDigits: def test_instantiate(self): assert Digits() is not None assert isinstance(Digits(), PreTokenizer) assert isinstance(Digits(), Digits) assert isinstance(Digits(True), Digits) assert isinstance(Digits(False), Digits) assert isinstance(pickle.loads(pickle.dumps(Digits())), Digits) def test_can_modify(self): pretok = Digits(individual_digits=False) assert pretok.individual_digits == False # Modify these pretok.individual_digits = True assert pretok.individual_digits == True class TestUnicodeScripts: def test_instantiate(self): assert UnicodeScripts() is not None assert isinstance(UnicodeScripts(), PreTokenizer) assert isinstance(UnicodeScripts(), UnicodeScripts) assert isinstance(pickle.loads(pickle.dumps(UnicodeScripts())), UnicodeScripts) class TestCustomPreTokenizer: class BadCustomPretok: def pre_tokenize(self, pretok, wrong): # This method does not have the right signature: it takes one too many arg pass class GoodCustomPretok: def split(self, n, normalized): # Here we just test that we can return a List[NormalizedString], it # does not really make sense to return twice the same otherwise return [normalized, normalized] def pre_tokenize(self, pretok): pretok.split(self.split) def test_instantiate(self): bad = PreTokenizer.custom(TestCustomPreTokenizer.BadCustomPretok()) good = PreTokenizer.custom(TestCustomPreTokenizer.GoodCustomPretok()) assert isinstance(bad, PreTokenizer) assert isinstance(good, PreTokenizer) with pytest.raises(Exception, match="TypeError:.*pre_tokenize()"): bad.pre_tokenize_str("Hey there!") assert good.pre_tokenize_str("Hey there!") == [ ("Hey there!", (0, 10)), ("Hey there!", (0, 10)), ] def test_camel_case(self): class CamelCasePretok: def get_state(self, c): if c.islower(): return "lower" elif c.isupper(): return "upper" elif c.isdigit(): return "digit" else: return "rest" def split(self, n, normalized): i = 0 # states = {"any", "lower", "upper", "digit", "rest"} state = "any" pieces = [] for j, c in enumerate(normalized.normalized): c_state = self.get_state(c) if state == "any": state = c_state if state != "rest" and state == c_state: pass elif state == "upper" and c_state == "lower": pass else: pieces.append(normalized[i:j]) i = j state = c_state pieces.append(normalized[i:]) return pieces def pre_tokenize(self, pretok): pretok.split(self.split) camel = PreTokenizer.custom(CamelCasePretok()) assert camel.pre_tokenize_str("HeyThere!?-ThisIsLife") == [ ("Hey", (0, 3)), ("There", (3, 8)), ("!", (8, 9)), ("?", (9, 10)), ("-", (10, 11)), ("This", (11, 15)), ("Is", (15, 17)), ("Life", (17, 21)), ]
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_encoding.py
import pytest from tokenizers import BertWordPieceTokenizer from ..utils import bert_files, data_dir class TestEncoding: @pytest.fixture(scope="class") def encodings(self, bert_files): tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"]) single_encoding = tokenizer.encode("I love HuggingFace") pair_encoding = tokenizer.encode("I love HuggingFace", "Do you?") return single_encoding, pair_encoding def test_sequence_ids(self, encodings): single, pair = encodings assert single.sequence_ids == [None, 0, 0, 0, 0, None] assert pair.sequence_ids == [None, 0, 0, 0, 0, None, 1, 1, 1, None] def test_n_sequences(self, encodings): single, pair = encodings assert single.n_sequences == 1 assert pair.n_sequences == 2 def test_word_to_tokens(self, encodings): single, pair = encodings assert single.tokens == ["[CLS]", "i", "love", "hugging", "##face", "[SEP]"] assert single.word_to_tokens(0) == (1, 2) assert pair.tokens == [ "[CLS]", "i", "love", "hugging", "##face", "[SEP]", "do", "you", "?", "[SEP]", ] assert pair.word_to_tokens(0) == (1, 2) assert pair.word_to_tokens(0, 0) == (1, 2) assert pair.word_to_tokens(6, 0) == None assert pair.word_to_tokens(0, 1) == (6, 7) def test_word_to_chars(self, encodings): single, pair = encodings assert single.word_to_chars(2) == (7, 18) assert pair.word_to_chars(2) == (7, 18) assert pair.word_to_chars(2, 0) == (7, 18) assert pair.word_to_chars(2, 1) == (6, 7) def test_token_to_sequence(self, encodings): single, pair = encodings assert single.token_to_sequence(2) == 0 assert pair.token_to_sequence(2) == 0 assert pair.token_to_sequence(0) == None assert pair.token_to_sequence(5) == None assert pair.token_to_sequence(6) == 1 assert pair.token_to_sequence(8) == 1 assert pair.token_to_sequence(9) == None assert pair.token_to_sequence(1200) == None def test_token_to_chars(self, encodings): single, pair = encodings assert single.token_to_chars(0) == None assert single.token_to_chars(2) == (2, 6) assert pair.token_to_chars(2) == (2, 6) assert pair.token_to_chars(5) == None assert pair.token_to_chars(6) == (0, 2) def test_token_to_word(self, encodings): single, pair = encodings assert single.token_to_word(0) == None assert single.token_to_word(1) == 0 assert single.token_to_word(4) == 2 assert pair.token_to_word(1) == 0 assert pair.token_to_word(4) == 2 assert pair.token_to_word(5) == None assert pair.token_to_word(6) == 0 assert pair.token_to_word(7) == 1 def test_char_to_token(self, encodings): single, pair = encodings assert single.char_to_token(0) == 1 assert pair.char_to_token(0) == 1 assert pair.char_to_token(0, 0) == 1 assert pair.char_to_token(1, 0) == None assert pair.char_to_token(0, 1) == 6 assert pair.char_to_token(2, 1) == None def test_char_to_word(self, encodings): single, pair = encodings assert single.char_to_word(0) == 0 assert single.char_to_word(1) == None assert pair.char_to_word(2) == 1 assert pair.char_to_word(2, 0) == 1 assert pair.char_to_word(2, 1) == None assert pair.char_to_word(3, 1) == 1 def test_truncation(self, encodings): single, _ = encodings single.truncate(2, 1, "right") assert single.tokens == ["[CLS]", "i"] assert single.overflowing[0].tokens == ["i", "love"] def test_invalid_truncate_direction(self, encodings): single, _ = encodings with pytest.raises(ValueError) as excinfo: single.truncate(2, 1, "not_a_direction") assert "Invalid truncation direction value : not_a_direction" == str(excinfo.value)
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_normalizers.py
import pickle import pytest from tokenizers import NormalizedString, Tokenizer from tokenizers.models import BPE from tokenizers.normalizers import BertNormalizer, Lowercase, Normalizer, Sequence, Strip, Prepend class TestBertNormalizer: def test_instantiate(self): assert isinstance(BertNormalizer(), Normalizer) assert isinstance(BertNormalizer(), BertNormalizer) assert isinstance(pickle.loads(pickle.dumps(BertNormalizer())), BertNormalizer) def test_strip_accents(self): normalizer = BertNormalizer(strip_accents=True, lowercase=False, handle_chinese_chars=False, clean_text=False) output = normalizer.normalize_str("Héllò") assert output == "Hello" def test_handle_chinese_chars(self): normalizer = BertNormalizer(strip_accents=False, lowercase=False, handle_chinese_chars=True, clean_text=False) output = normalizer.normalize_str("你好") assert output == " 你 好 " def test_clean_text(self): normalizer = BertNormalizer(strip_accents=False, lowercase=False, handle_chinese_chars=False, clean_text=True) output = normalizer.normalize_str("\ufeffHello") assert output == "Hello" def test_lowercase(self): normalizer = BertNormalizer(strip_accents=False, lowercase=True, handle_chinese_chars=False, clean_text=False) output = normalizer.normalize_str("Héllò") assert output == "héllò" def test_can_modify(self): normalizer = BertNormalizer(clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True) assert normalizer.clean_text == True assert normalizer.handle_chinese_chars == True assert normalizer.strip_accents == True assert normalizer.lowercase == True # Modify these normalizer.clean_text = False assert normalizer.clean_text == False normalizer.handle_chinese_chars = False assert normalizer.handle_chinese_chars == False normalizer.strip_accents = None assert normalizer.strip_accents == None normalizer.lowercase = False assert normalizer.lowercase == False class TestSequence: def test_instantiate(self): assert isinstance(Sequence([]), Normalizer) assert isinstance(Sequence([]), Sequence) assert isinstance(pickle.loads(pickle.dumps(Sequence([]))), Sequence) def test_can_make_sequences(self): normalizer = Sequence([Lowercase(), Strip()]) output = normalizer.normalize_str(" HELLO ") assert output == "hello" class TestLowercase: def test_instantiate(self): assert isinstance(Lowercase(), Normalizer) assert isinstance(Lowercase(), Lowercase) assert isinstance(pickle.loads(pickle.dumps(Lowercase())), Lowercase) def test_lowercase(self): normalizer = Lowercase() output = normalizer.normalize_str("HELLO") assert output == "hello" class TestStrip: def test_instantiate(self): assert isinstance(Strip(), Normalizer) assert isinstance(Strip(), Strip) assert isinstance(pickle.loads(pickle.dumps(Strip())), Strip) def test_left_strip(self): normalizer = Strip(left=True, right=False) output = normalizer.normalize_str(" hello ") assert output == "hello " def test_right_strip(self): normalizer = Strip(left=False, right=True) output = normalizer.normalize_str(" hello ") assert output == " hello" def test_full_strip(self): normalizer = Strip(left=True, right=True) output = normalizer.normalize_str(" hello ") assert output == "hello" def test_can_modify(self): normalizer = Strip(left=True, right=True) assert normalizer.left == True assert normalizer.right == True # Modify these normalizer.left = False assert normalizer.left == False normalizer.right = False assert normalizer.right == False class TestPrepend: def test_instantiate(self): assert isinstance(Prepend("▁"), Normalizer) assert isinstance(Prepend("▁"), Prepend) assert isinstance(pickle.loads(pickle.dumps(Prepend("▁"))), Prepend) def test_prepend(self): normalizer = Prepend(prepend="▁") output = normalizer.normalize_str("hello") assert output == "▁hello" def test_can_modify(self): normalizer = Prepend("▁") assert normalizer.prepend == "▁" # Modify these normalizer.prepend = "-" assert normalizer.prepend == "-" class TestCustomNormalizer: class BadCustomNormalizer: def normalize(self, normalized, wrong): pass class GoodCustomNormalizer: def normalize(self, normalized): self.kept_normalized = normalized normalized.replace("there", "you") def use_after_normalize(self): self.kept_normalized.replace("something", "else") def test_instantiate(self): bad = Normalizer.custom(TestCustomNormalizer.BadCustomNormalizer()) good_custom = TestCustomNormalizer.GoodCustomNormalizer() good = Normalizer.custom(good_custom) assert isinstance(bad, Normalizer) assert isinstance(good, Normalizer) with pytest.raises(Exception, match="TypeError:.*normalize()"): bad.normalize_str("Hey there!") assert good.normalize_str("Hey there!") == "Hey you!" with pytest.raises(Exception, match="Cannot use a NormalizedStringRefMut outside `normalize`"): good_custom.use_after_normalize() def test_normalizer_interface(self): normalizer = Normalizer.custom(TestCustomNormalizer.GoodCustomNormalizer()) normalized = NormalizedString("Hey there!") normalizer.normalize(normalized) assert repr(normalized) == 'NormalizedString(original="Hey there!", normalized="Hey you!")' assert str(normalized) == "Hey you!"
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/documentation/test_quicktour.py
from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.pre_tokenizers import Whitespace from tokenizers.trainers import BpeTrainer from ..utils import data_dir, doc_wiki_tokenizer disable_printing = True original_print = print def print(*args, **kwargs): if not disable_printing: original_print(*args, **kwargs) class TestQuicktour: # This method contains everything we don't want to run @staticmethod def slow_train(): tokenizer, trainer = TestQuicktour.get_tokenizer_trainer() # START train files = [f"data/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "valid"]] tokenizer.train(files, trainer) # END train # START save tokenizer.save("data/tokenizer-wiki.json") # END save @staticmethod def get_tokenizer_trainer(): # START init_tokenizer from tokenizers import Tokenizer from tokenizers.models import BPE tokenizer = Tokenizer(BPE(unk_token="[UNK]")) # END init_tokenizer # START init_trainer from tokenizers.trainers import BpeTrainer trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) # END init_trainer # START init_pretok from tokenizers.pre_tokenizers import Whitespace tokenizer.pre_tokenizer = Whitespace() # END init_pretok return tokenizer, trainer def test_quicktour(self, doc_wiki_tokenizer): def print(*args, **kwargs): pass try: # START reload_tokenizer tokenizer = Tokenizer.from_file("data/tokenizer-wiki.json") # END reload_tokenizer except Exception: tokenizer = Tokenizer.from_file(doc_wiki_tokenizer) # START encode output = tokenizer.encode("Hello, y'all! How are you 😁 ?") # END encode # START print_tokens print(output.tokens) # ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"] # END print_tokens assert output.tokens == [ "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", ] # START print_ids print(output.ids) # [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35] # END print_ids assert output.ids == [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35] # START print_offsets print(output.offsets[9]) # (26, 27) # END print_offsets assert output.offsets[9] == (26, 27) # START use_offsets sentence = "Hello, y'all! How are you 😁 ?" sentence[26:27] # "😁" # END use_offsets assert sentence[26:27] == "😁" # START check_sep tokenizer.token_to_id("[SEP]") # 2 # END check_sep assert tokenizer.token_to_id("[SEP]") == 2 # START init_template_processing from tokenizers.processors import TemplateProcessing tokenizer.post_processor = TemplateProcessing( single="[CLS] $A [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[ ("[CLS]", tokenizer.token_to_id("[CLS]")), ("[SEP]", tokenizer.token_to_id("[SEP]")), ], ) # END init_template_processing # START print_special_tokens output = tokenizer.encode("Hello, y'all! How are you 😁 ?") print(output.tokens) # ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"] # END print_special_tokens assert output.tokens == [ "[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]", ] # START print_special_tokens_pair output = tokenizer.encode("Hello, y'all!", "How are you 😁 ?") print(output.tokens) # ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"] # END print_special_tokens_pair assert output.tokens == [ "[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]", ] # START print_type_ids print(output.type_ids) # [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] # END print_type_ids assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] # START encode_batch output = tokenizer.encode_batch(["Hello, y'all!", "How are you 😁 ?"]) # END encode_batch # START encode_batch_pair output = tokenizer.encode_batch( [["Hello, y'all!", "How are you 😁 ?"], ["Hello to you too!", "I'm fine, thank you!"]] ) # END encode_batch_pair # START enable_padding tokenizer.enable_padding(pad_id=3, pad_token="[PAD]") # END enable_padding # START print_batch_tokens output = tokenizer.encode_batch(["Hello, y'all!", "How are you 😁 ?"]) print(output[1].tokens) # ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"] # END print_batch_tokens assert output[1].tokens == ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"] # START print_attention_mask print(output[1].attention_mask) # [1, 1, 1, 1, 1, 1, 1, 0] # END print_attention_mask assert output[1].attention_mask == [1, 1, 1, 1, 1, 1, 1, 0] if __name__ == "__main__": import os from urllib import request from zipfile import ZipFile disable_printing = False if not os.path.isdir("data/wikitext-103-raw"): print("Downloading wikitext-103...") wiki_text, _ = request.urlretrieve( "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip" ) with ZipFile(wiki_text, "r") as z: print("Unzipping in data...") z.extractall("data") print("Now training...") TestQuicktour.slow_train()
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
import gzip import os import datasets import pytest from ..utils import data_dir, train_files class TestTrainFromIterators: @staticmethod def get_tokenizer_trainer(): # START init_tokenizer_trainer from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, trainers tokenizer = Tokenizer(models.Unigram()) tokenizer.normalizer = normalizers.NFKC() tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel() tokenizer.decoder = decoders.ByteLevel() trainer = trainers.UnigramTrainer( vocab_size=20000, initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), special_tokens=["<PAD>", "<BOS>", "<EOS>"], ) # END init_tokenizer_trainer trainer.show_progress = False return tokenizer, trainer @staticmethod def load_dummy_dataset(): # START load_dataset import datasets dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train+test+validation") # END load_dataset @pytest.fixture(scope="class") def setup_gzip_files(self, train_files): with open(train_files["small"], "rt") as small: for n in range(3): path = f"data/my-file.{n}.gz" with gzip.open(path, "wt") as f: f.write(small.read()) def test_train_basic(self): tokenizer, trainer = self.get_tokenizer_trainer() # START train_basic # First few lines of the "Zen of Python" https://www.python.org/dev/peps/pep-0020/ data = [ "Beautiful is better than ugly." "Explicit is better than implicit." "Simple is better than complex." "Complex is better than complicated." "Flat is better than nested." "Sparse is better than dense." "Readability counts." ] tokenizer.train_from_iterator(data, trainer=trainer) # END train_basic def test_datasets(self): tokenizer, trainer = self.get_tokenizer_trainer() # In order to keep tests fast, we only use the first 100 examples os.environ["TOKENIZERS_PARALLELISM"] = "true" dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train[0:100]") # START def_batch_iterator def batch_iterator(batch_size=1000): # Only keep the text column to avoid decoding the rest of the columns unnecessarily tok_dataset = dataset.select_columns("text") for batch in tok_dataset.iter(batch_size): yield batch["text"] # END def_batch_iterator # START train_datasets tokenizer.train_from_iterator(batch_iterator(), trainer=trainer, length=len(dataset)) # END train_datasets def test_gzip(self, setup_gzip_files): tokenizer, trainer = self.get_tokenizer_trainer() # START single_gzip import gzip with gzip.open("data/my-file.0.gz", "rt") as f: tokenizer.train_from_iterator(f, trainer=trainer) # END single_gzip # START multi_gzip files = ["data/my-file.0.gz", "data/my-file.1.gz", "data/my-file.2.gz"] def gzip_iterator(): for path in files: with gzip.open(path, "rt") as f: for line in f: yield line tokenizer.train_from_iterator(gzip_iterator(), trainer=trainer) # END multi_gzip
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/documentation/test_pipeline.py
from tokenizers import Tokenizer from ..utils import data_dir, doc_pipeline_bert_tokenizer, doc_wiki_tokenizer disable_printing = True original_print = print def print(*args, **kwargs): if not disable_printing: original_print(*args, **kwargs) class TestPipeline: def test_pipeline(self, doc_wiki_tokenizer): try: # START reload_tokenizer from tokenizers import Tokenizer tokenizer = Tokenizer.from_file("data/tokenizer-wiki.json") # END reload_tokenizer except Exception: tokenizer = Tokenizer.from_file(doc_wiki_tokenizer) # START setup_normalizer from tokenizers import normalizers from tokenizers.normalizers import NFD, StripAccents normalizer = normalizers.Sequence([NFD(), StripAccents()]) # END setup_normalizer # START test_normalizer normalizer.normalize_str("Héllò hôw are ü?") # "Hello how are u?" # END test_normalizer assert normalizer.normalize_str("Héllò hôw are ü?") == "Hello how are u?" # START replace_normalizer tokenizer.normalizer = normalizer # END replace_normalizer # START setup_pre_tokenizer from tokenizers.pre_tokenizers import Whitespace pre_tokenizer = Whitespace() pre_tokenizer.pre_tokenize_str("Hello! How are you? I'm fine, thank you.") # [("Hello", (0, 5)), ("!", (5, 6)), ("How", (7, 10)), ("are", (11, 14)), ("you", (15, 18)), # ("?", (18, 19)), ("I", (20, 21)), ("'", (21, 22)), ('m', (22, 23)), ("fine", (24, 28)), # (",", (28, 29)), ("thank", (30, 35)), ("you", (36, 39)), (".", (39, 40))] # END setup_pre_tokenizer assert pre_tokenizer.pre_tokenize_str("Hello! How are you? I'm fine, thank you.") == [ ("Hello", (0, 5)), ("!", (5, 6)), ("How", (7, 10)), ("are", (11, 14)), ("you", (15, 18)), ("?", (18, 19)), ("I", (20, 21)), ("'", (21, 22)), ("m", (22, 23)), ("fine", (24, 28)), (",", (28, 29)), ("thank", (30, 35)), ("you", (36, 39)), (".", (39, 40)), ] # START combine_pre_tokenizer from tokenizers import pre_tokenizers from tokenizers.pre_tokenizers import Digits pre_tokenizer = pre_tokenizers.Sequence([Whitespace(), Digits(individual_digits=True)]) pre_tokenizer.pre_tokenize_str("Call 911!") # [("Call", (0, 4)), ("9", (5, 6)), ("1", (6, 7)), ("1", (7, 8)), ("!", (8, 9))] # END combine_pre_tokenizer assert pre_tokenizer.pre_tokenize_str("Call 911!") == [ ("Call", (0, 4)), ("9", (5, 6)), ("1", (6, 7)), ("1", (7, 8)), ("!", (8, 9)), ] # START replace_pre_tokenizer tokenizer.pre_tokenizer = pre_tokenizer # END replace_pre_tokenizer # START setup_processor from tokenizers.processors import TemplateProcessing tokenizer.post_processor = TemplateProcessing( single="[CLS] $A [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[("[CLS]", 1), ("[SEP]", 2)], ) # END setup_processor # START test_decoding output = tokenizer.encode("Hello, y'all! How are you 😁 ?") print(output.ids) # [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2] tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]) # "Hello , y ' all ! How are you ?" # END test_decoding assert output.ids == [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2] assert ( tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]) == "Hello , y ' all ! How are you ?" ) @staticmethod def slow_train(): # START bert_setup_tokenizer from tokenizers import Tokenizer from tokenizers.models import WordPiece bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) # END bert_setup_tokenizer # START bert_setup_normalizer from tokenizers import normalizers from tokenizers.normalizers import NFD, Lowercase, StripAccents bert_tokenizer.normalizer = normalizers.Sequence([NFD(), Lowercase(), StripAccents()]) # END bert_setup_normalizer # START bert_setup_pre_tokenizer from tokenizers.pre_tokenizers import Whitespace bert_tokenizer.pre_tokenizer = Whitespace() # END bert_setup_pre_tokenizer # START bert_setup_processor from tokenizers.processors import TemplateProcessing bert_tokenizer.post_processor = TemplateProcessing( single="[CLS] $A [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[ ("[CLS]", 1), ("[SEP]", 2), ], ) # END bert_setup_processor # START bert_train_tokenizer from tokenizers.trainers import WordPieceTrainer trainer = WordPieceTrainer(vocab_size=30522, special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) files = [f"data/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "valid"]] bert_tokenizer.train(files, trainer) bert_tokenizer.save("data/bert-wiki.json") # END bert_train_tokenizer def test_bert_example(self, doc_pipeline_bert_tokenizer): try: bert_tokenizer = Tokenizer.from_file("data/bert-wiki.json") except Exception: bert_tokenizer = Tokenizer.from_file(doc_pipeline_bert_tokenizer) # START bert_test_decoding output = bert_tokenizer.encode("Welcome to the 🤗 Tokenizers library.") print(output.tokens) # ["[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]"] bert_tokenizer.decode(output.ids) # "welcome to the tok ##eni ##zer ##s library ." # END bert_test_decoding assert bert_tokenizer.decode(output.ids) == "welcome to the tok ##eni ##zer ##s library ." # START bert_proper_decoding from tokenizers import decoders bert_tokenizer.decoder = decoders.WordPiece() bert_tokenizer.decode(output.ids) # "welcome to the tokenizers library." # END bert_proper_decoding assert bert_tokenizer.decode(output.ids) == "welcome to the tokenizers library." if __name__ == "__main__": import os from urllib import request from zipfile import ZipFile disable_printing = False if not os.path.isdir("data/wikitext-103-raw"): print("Downloading wikitext-103...") wiki_text, _ = request.urlretrieve( "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip" ) with ZipFile(wiki_text, "r") as z: print("Unzipping in data...") z.extractall("data") print("Now training...") TestPipeline.slow_train()
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_bert_wordpiece.py
import pytest from tokenizers import BertWordPieceTokenizer from ..utils import bert_files, data_dir, multiprocessing_with_parallelism class TestBertWordPieceTokenizer: def test_basic_encode(self, bert_files): tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"]) # Encode with special tokens by default output = tokenizer.encode("My name is John", "pair") assert output.ids == [101, 2026, 2171, 2003, 2198, 102, 3940, 102] assert output.tokens == [ "[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]", ] assert output.offsets == [ (0, 0), (0, 2), (3, 7), (8, 10), (11, 15), (0, 0), (0, 4), (0, 0), ] assert output.type_ids == [0, 0, 0, 0, 0, 0, 1, 1] # Can encode without the special tokens output = tokenizer.encode("My name is John", "pair", add_special_tokens=False) assert output.ids == [2026, 2171, 2003, 2198, 3940] assert output.tokens == ["my", "name", "is", "john", "pair"] assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15), (0, 4)] assert output.type_ids == [0, 0, 0, 0, 1] def test_multiprocessing_with_parallelism(self, bert_files): tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"]) multiprocessing_with_parallelism(tokenizer, False) multiprocessing_with_parallelism(tokenizer, True) def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = BertWordPieceTokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["a", "sentence"]
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_sentencepiece.py
import os import pytest from tokenizers import SentencePieceBPETokenizer, SentencePieceUnigramTokenizer class TestSentencePieceBPE: def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = SentencePieceBPETokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["▁A", "▁sentence"] class TestSentencePieceUnigram: def test_train(self, tmpdir): p = tmpdir.mkdir("tmpdir").join("file.txt") p.write("A first sentence\nAnother sentence\nAnd a last one") tokenizer = SentencePieceUnigramTokenizer() tokenizer.train(files=str(p), show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e"] with pytest.raises(Exception) as excinfo: _ = tokenizer.encode("A sentence 🤗") assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing" def test_train_with_unk_token(self, tmpdir): p = tmpdir.mkdir("tmpdir").join("file.txt") p.write("A first sentence\nAnother sentence\nAnd a last one") tokenizer = SentencePieceUnigramTokenizer() tokenizer.train(files=str(p), show_progress=False, special_tokens=["<unk>"], unk_token="<unk>") output = tokenizer.encode("A sentence 🤗") assert output.ids[-1] == 0 assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e", "▁", "🤗"] def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = SentencePieceUnigramTokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e"] with pytest.raises(Exception) as excinfo: _ = tokenizer.encode("A sentence 🤗") assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing" def test_train_from_iterator_with_unk_token(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = SentencePieceUnigramTokenizer() tokenizer.train_from_iterator( text, vocab_size=100, show_progress=False, special_tokens=["<unk>"], unk_token="<unk>" ) output = tokenizer.encode("A sentence 🤗") assert output.ids[-1] == 0 assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e", "▁", "🤗"]
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_char_bpe.py
import pytest from tokenizers import CharBPETokenizer from ..utils import data_dir, multiprocessing_with_parallelism, openai_files class TestCharBPETokenizer: def test_basic_encode(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"]) output = tokenizer.encode("My name is John", "pair") assert output.ids == [0, 253, 1362, 544, 0, 7, 12662, 2688] assert output.tokens == [ "<unk>", "y</w>", "name</w>", "is</w>", "<unk>", "o", "hn</w>", "pair</w>", ] assert output.offsets == [ (0, 1), (1, 2), (3, 7), (8, 10), (11, 12), (12, 13), (13, 15), (0, 4), ] assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 1] def test_lowercase(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"], lowercase=True) output = tokenizer.encode("My name is John", "pair", add_special_tokens=False) assert output.ids == [547, 1362, 544, 2476, 2688] assert output.tokens == ["my</w>", "name</w>", "is</w>", "john</w>", "pair</w>"] assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15), (0, 4)] assert output.type_ids == [0, 0, 0, 0, 1] def test_decoding(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"], lowercase=True) decoded = tokenizer.decode(tokenizer.encode("my name is john").ids) assert decoded == "my name is john" def test_multiprocessing_with_parallelism(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"]) multiprocessing_with_parallelism(tokenizer, False) multiprocessing_with_parallelism(tokenizer, True) def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = CharBPETokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["A</w>", "sentence</w>"]
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_base_tokenizer.py
import pytest from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, processors from tokenizers.implementations import BaseTokenizer class TestBaseTokenizer: def test_get_set_components(self): toki = Tokenizer(models.BPE()) toki.normalizer = normalizers.NFC() toki.pre_tokenizer = pre_tokenizers.ByteLevel() toki.post_processor = processors.BertProcessing(("A", 0), ("B", 1)) toki.decoder = decoders.ByteLevel() tokenizer = BaseTokenizer(toki) assert isinstance(tokenizer.model, models.BPE) assert isinstance(tokenizer.normalizer, normalizers.NFC) assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.ByteLevel) assert isinstance(tokenizer.post_processor, processors.BertProcessing) assert isinstance(tokenizer.decoder, decoders.ByteLevel) tokenizer.model = models.Unigram() assert isinstance(tokenizer.model, models.Unigram) tokenizer.normalizer = normalizers.NFD() assert isinstance(tokenizer.normalizer, normalizers.NFD) tokenizer.pre_tokenizer = pre_tokenizers.Whitespace() assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.Whitespace) tokenizer.post_processor = processors.ByteLevel() assert isinstance(tokenizer.post_processor, processors.ByteLevel) tokenizer.decoder = decoders.WordPiece() assert isinstance(tokenizer.decoder, decoders.WordPiece)
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_byte_level_bpe.py
import pytest from tokenizers import ByteLevelBPETokenizer from ..utils import data_dir, multiprocessing_with_parallelism, roberta_files class TestByteLevelBPE: def test_basic_encode(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"]) output = tokenizer.encode("The quick brown fox jumps over the lazy dog") assert output.ids == [133, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335] assert output.tokens == [ "The", "Ġquick", "Ġbrown", "Ġfox", "Ġjumps", "Ġover", "Ġthe", "Ġlazy", "Ġdog", ] assert output.offsets == [ (0, 3), (3, 9), (9, 15), (15, 19), (19, 25), (25, 30), (30, 34), (34, 39), (39, 43), ] def test_add_prefix_space(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file( roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True ) output = tokenizer.encode("The quick brown fox jumps over the lazy dog") assert output.ids == [20, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335] assert output.tokens == [ "ĠThe", "Ġquick", "Ġbrown", "Ġfox", "Ġjumps", "Ġover", "Ġthe", "Ġlazy", "Ġdog", ] assert output.offsets == [ (0, 3), (3, 9), (9, 15), (15, 19), (19, 25), (25, 30), (30, 34), (34, 39), (39, 43), ] def test_lowerspace(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file( roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True, lowercase=True, ) output = tokenizer.encode("The Quick Brown Fox Jumps Over The Lazy Dog") assert output.ids == [5, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335] assert output.tokens == [ "Ġthe", "Ġquick", "Ġbrown", "Ġfox", "Ġjumps", "Ġover", "Ġthe", "Ġlazy", "Ġdog", ] def test_multiprocessing_with_parallelism(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"]) multiprocessing_with_parallelism(tokenizer, False) multiprocessing_with_parallelism(tokenizer, True) def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = ByteLevelBPETokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["A", "Ġsentence"]
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/.cargo/config.toml
[target.x86_64-apple-darwin] rustflags = [ "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", "-C", "link-arg=-mmacosx-version-min=10.11", ] [target.aarch64-apple-darwin] rustflags = [ "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", "-C", "link-arg=-mmacosx-version-min=10.11", ]
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/lib.rs
#![warn(clippy::all)] #![allow(clippy::upper_case_acronyms)] // Many false positives with pyo3 it seems &str, and &PyAny get flagged #![allow(clippy::borrow_deref_ref)] extern crate tokenizers as tk; mod decoders; mod encoding; mod error; mod models; mod normalizers; mod pre_tokenizers; mod processors; mod token; mod tokenizer; mod trainers; mod utils; use pyo3::prelude::*; use pyo3::wrap_pymodule; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); // For users using multiprocessing in python, it is quite easy to fork the process running // tokenizers, ending up with a deadlock because we internaly make use of multithreading. So // we register a callback to be called in the event of a fork so that we can warn the user. #[cfg(target_family = "unix")] static mut REGISTERED_FORK_CALLBACK: bool = false; #[cfg(target_family = "unix")] extern "C" fn child_after_fork() { use tk::parallelism::*; if has_parallelism_been_used() && !is_parallelism_configured() { eprintln!( "huggingface/tokenizers: The current process just got forked, after parallelism has \ already been used. Disabling parallelism to avoid deadlocks..." ); eprintln!("To disable this warning, you can either:"); eprintln!( "\t- Avoid using `tokenizers` before the fork if possible\n\ \t- Explicitly set the environment variable {}=(true | false)", ENV_VARIABLE ); set_parallelism(false); } } /// Tokenizers Module #[pymodule] pub fn tokenizers(_py: Python, m: &PyModule) -> PyResult<()> { let _ = env_logger::try_init_from_env("TOKENIZERS_LOG"); // Register the fork callback #[cfg(target_family = "unix")] unsafe { if !REGISTERED_FORK_CALLBACK { libc::pthread_atfork(None, None, Some(child_after_fork)); REGISTERED_FORK_CALLBACK = true; } } m.add_class::<tokenizer::PyTokenizer>()?; m.add_class::<tokenizer::PyAddedToken>()?; m.add_class::<token::PyToken>()?; m.add_class::<encoding::PyEncoding>()?; m.add_class::<utils::PyRegex>()?; m.add_class::<utils::PyNormalizedString>()?; m.add_class::<utils::PyPreTokenizedString>()?; m.add_wrapped(wrap_pymodule!(models::models))?; m.add_wrapped(wrap_pymodule!(pre_tokenizers::pre_tokenizers))?; m.add_wrapped(wrap_pymodule!(decoders::decoders))?; m.add_wrapped(wrap_pymodule!(processors::processors))?; m.add_wrapped(wrap_pymodule!(normalizers::normalizers))?; m.add_wrapped(wrap_pymodule!(trainers::trainers))?; m.add("__version__", env!("CARGO_PKG_VERSION"))?; Ok(()) }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/token.rs
use pyo3::prelude::*; use tk::Token; #[pyclass(module = "tokenizers", name = "Token")] #[derive(Clone)] pub struct PyToken { token: Token, } impl From<Token> for PyToken { fn from(token: Token) -> Self { Self { token } } } impl From<PyToken> for Token { fn from(token: PyToken) -> Self { token.token } } #[pymethods] impl PyToken { #[new] #[pyo3(text_signature = None)] fn new(id: u32, value: String, offsets: (usize, usize)) -> PyToken { Token::new(id, value, offsets).into() } #[getter] fn get_id(&self) -> u32 { self.token.id } #[getter] fn get_value(&self) -> &str { &self.token.value } #[getter] fn get_offsets(&self) -> (usize, usize) { self.token.offsets } fn as_tuple(&self) -> (u32, &str, (usize, usize)) { (self.token.id, &self.token.value, self.token.offsets) } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/models.rs
use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::sync::{Arc, RwLock}; use crate::token::PyToken; use crate::trainers::PyTrainer; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::{Deserialize, Serialize}; use tk::models::bpe::{BpeBuilder, Merges, Vocab, BPE}; use tk::models::unigram::Unigram; use tk::models::wordlevel::WordLevel; use tk::models::wordpiece::{WordPiece, WordPieceBuilder}; use tk::models::ModelWrapper; use tk::{Model, Token}; use tokenizers as tk; use super::error::{deprecation_warning, ToPyResult}; /// Base class for all models /// /// The model represents the actual tokenization algorithm. This is the part that /// will contain and manage the learned vocabulary. /// /// This class cannot be constructed directly. Please use one of the concrete models. #[pyclass(module = "tokenizers.models", name = "Model", subclass)] #[derive(Clone, Serialize, Deserialize)] pub struct PyModel { #[serde(flatten)] pub model: Arc<RwLock<ModelWrapper>>, } impl PyModel { pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match *self.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => Py::new(py, (PyBPE {}, base))?.into_py(py), ModelWrapper::WordPiece(_) => Py::new(py, (PyWordPiece {}, base))?.into_py(py), ModelWrapper::WordLevel(_) => Py::new(py, (PyWordLevel {}, base))?.into_py(py), ModelWrapper::Unigram(_) => Py::new(py, (PyUnigram {}, base))?.into_py(py), }) } } impl Model for PyModel { type Trainer = PyTrainer; fn tokenize(&self, tokens: &str) -> tk::Result<Vec<Token>> { self.model.read().unwrap().tokenize(tokens) } fn token_to_id(&self, token: &str) -> Option<u32> { self.model.read().unwrap().token_to_id(token) } fn id_to_token(&self, id: u32) -> Option<String> { self.model.read().unwrap().id_to_token(id) } fn get_vocab(&self) -> HashMap<String, u32> { self.model.read().unwrap().get_vocab() } fn get_vocab_size(&self) -> usize { self.model.read().unwrap().get_vocab_size() } fn save(&self, folder: &Path, name: Option<&str>) -> tk::Result<Vec<PathBuf>> { self.model.read().unwrap().save(folder, name) } fn get_trainer(&self) -> Self::Trainer { self.model.read().unwrap().get_trainer().into() } } impl<I> From<I> for PyModel where I: Into<ModelWrapper>, { fn from(model: I) -> Self { Self { model: Arc::new(RwLock::new(model.into())), } } } #[pymethods] impl PyModel { #[new] #[pyo3(text_signature = None)] fn __new__() -> Self { // Instantiate a default empty model. This doesn't really make sense, but we need // to be able to instantiate an empty model for pickle capabilities. PyModel { model: Arc::new(RwLock::new(BPE::default().into())), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.model).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Model: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.model = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Model: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Tokenize a sequence /// /// Args: /// sequence (:obj:`str`): /// A sequence to tokenize /// /// Returns: /// A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens #[pyo3(text_signature = "(self, sequence)")] fn tokenize(&self, sequence: &str) -> PyResult<Vec<PyToken>> { Ok(ToPyResult(self.model.read().unwrap().tokenize(sequence)) .into_py()? .into_iter() .map(|t| t.into()) .collect()) } /// Get the ID associated to a token /// /// Args: /// token (:obj:`str`): /// A token to convert to an ID /// /// Returns: /// :obj:`int`: The ID associated to the token #[pyo3(text_signature = "(self, tokens)")] fn token_to_id(&self, token: &str) -> Option<u32> { self.model.read().unwrap().token_to_id(token) } /// Get the token associated to an ID /// /// Args: /// id (:obj:`int`): /// An ID to convert to a token /// /// Returns: /// :obj:`str`: The token associated to the ID #[pyo3(text_signature = "(self, id)")] fn id_to_token(&self, id: u32) -> Option<String> { self.model.read().unwrap().id_to_token(id) } /// Save the current model /// /// Save the current model in the given folder, using the given prefix for the various /// files that will get created. /// Any file with the same name that already exists in this folder will be overwritten. /// /// Args: /// folder (:obj:`str`): /// The path to the target folder in which to save the various files /// /// prefix (:obj:`str`, `optional`): /// An optional prefix, used to prefix each file name /// /// Returns: /// :obj:`List[str]`: The list of saved files #[pyo3(text_signature = "(self, folder, prefix)")] fn save<'a>( &self, py: Python<'_>, folder: &str, mut prefix: Option<&'a str>, name: Option<&'a str>, ) -> PyResult<Vec<String>> { if name.is_some() { deprecation_warning( py, "0.10.0", "Parameter `name` of Model.save has been renamed `prefix`", )?; if prefix.is_none() { prefix = name; } } let saved: PyResult<Vec<_>> = ToPyResult(self.model.read().unwrap().save(Path::new(folder), prefix)).into(); Ok(saved? .into_iter() .map(|path| path.to_string_lossy().into_owned()) .collect()) } /// Get the associated :class:`~tokenizers.trainers.Trainer` /// /// Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this /// :class:`~tokenizers.models.Model`. /// /// Returns: /// :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model #[pyo3(text_signature = "(self)")] fn get_trainer(&self, py: Python<'_>) -> PyResult<PyObject> { PyTrainer::from(self.model.read().unwrap().get_trainer()).get_as_subtype(py) } } /// An implementation of the BPE (Byte-Pair Encoding) algorithm /// /// Args: /// vocab (:obj:`Dict[str, int]`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// merges (:obj:`List[Tuple[str, str]]`, `optional`): /// A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]` /// /// cache_capacity (:obj:`int`, `optional`): /// The number of words that the BPE cache can contain. The cache allows /// to speed-up the process by keeping the result of the merge operations /// for a number of words. /// /// dropout (:obj:`float`, `optional`): /// A float between 0 and 1 that represents the BPE dropout to use. /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. /// /// continuing_subword_prefix (:obj:`str`, `optional`): /// The prefix to attach to subword units that don't represent a beginning of word. /// /// end_of_word_suffix (:obj:`str`, `optional`): /// The suffix to attach to subword units that represent an end of word. /// /// fuse_unk (:obj:`bool`, `optional`): /// Whether to fuse any subsequent unknown tokens into a single one /// /// byte_fallback (:obj:`bool`, `optional`): /// Whether to use spm byte-fallback trick (defaults to False) #[pyclass(extends=PyModel, module = "tokenizers.models", name = "BPE")] pub struct PyBPE {} impl PyBPE { fn with_builder(mut builder: BpeBuilder, kwargs: Option<&PyDict>) -> PyResult<(Self, PyModel)> { if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "cache_capacity" => builder = builder.cache_capacity(value.extract()?), "dropout" => { if let Some(dropout) = value.extract()? { builder = builder.dropout(dropout); } } "unk_token" => { if let Some(unk) = value.extract()? { builder = builder.unk_token(unk); } } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(value.extract()?) } "end_of_word_suffix" => builder = builder.end_of_word_suffix(value.extract()?), "fuse_unk" => builder = builder.fuse_unk(value.extract()?), "byte_fallback" => builder = builder.byte_fallback(value.extract()?), _ => println!("Ignored unknown kwarg option {}", key), }; } } match builder.build() { Err(e) => Err(exceptions::PyException::new_err(format!( "Error while initializing BPE: {}", e ))), Ok(bpe) => Ok((PyBPE {}, bpe.into())), } } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); let model = super_.model.read().unwrap(); if let ModelWrapper::$variant(ref mo) = *model { mo.$($name)+ } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); let mut model = super_.model.write().unwrap(); if let ModelWrapper::$variant(ref mut mo) = *model { mo.$name = $value; } }}; } #[derive(FromPyObject)] enum PyVocab<'a> { Vocab(Vocab), Filename(&'a str), } #[derive(FromPyObject)] enum PyMerges<'a> { Merges(Merges), Filename(&'a str), } #[pymethods] impl PyBPE { #[getter] fn get_dropout(self_: PyRef<Self>) -> Option<f32> { getter!(self_, BPE, dropout) } #[setter] fn set_dropout(self_: PyRef<Self>, dropout: Option<f32>) { setter!(self_, BPE, dropout, dropout); } #[getter] fn get_unk_token(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: Option<String>) { setter!(self_, BPE, unk_token, unk_token); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix( self_: PyRef<Self>, continuing_subword_prefix: Option<String>, ) { setter!( self_, BPE, continuing_subword_prefix, continuing_subword_prefix ); } #[getter] fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, end_of_word_suffix.clone()) } #[setter] fn set_end_of_word_suffix(self_: PyRef<Self>, end_of_word_suffix: Option<String>) { setter!(self_, BPE, end_of_word_suffix, end_of_word_suffix); } #[getter] fn get_fuse_unk(self_: PyRef<Self>) -> bool { getter!(self_, BPE, fuse_unk) } #[setter] fn set_fuse_unk(self_: PyRef<Self>, fuse_unk: bool) { setter!(self_, BPE, fuse_unk, fuse_unk); } #[getter] fn get_byte_fallback(self_: PyRef<Self>) -> bool { getter!(self_, BPE, byte_fallback) } #[setter] fn set_byte_fallback(self_: PyRef<Self>, byte_fallback: bool) { setter!(self_, BPE, byte_fallback, byte_fallback); } #[new] #[pyo3( signature = (vocab=None, merges=None, **kwargs), text_signature = "(self, vocab=None, merges=None, cache_capacity=None, dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=None, byte_fallback=False)")] fn new( py: Python<'_>, vocab: Option<PyVocab>, merges: Option<PyMerges>, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { if (vocab.is_some() && merges.is_none()) || (vocab.is_none() && merges.is_some()) { return Err(exceptions::PyValueError::new_err( "`vocab` and `merges` must be both specified", )); } let mut builder = BPE::builder(); if let (Some(vocab), Some(merges)) = (vocab, merges) { match (vocab, merges) { (PyVocab::Vocab(vocab), PyMerges::Merges(merges)) => { builder = builder.vocab_and_merges(vocab, merges); } (PyVocab::Filename(vocab_filename), PyMerges::Filename(merges_filename)) => { deprecation_warning( py, "0.9.0", "BPE.__init__ will not create from files anymore, try `BPE.from_file` instead", )?; builder = builder.files(vocab_filename.to_string(), merges_filename.to_string()); } _ => { return Err(exceptions::PyValueError::new_err( "`vocab` and `merges` must be both be from memory or both filenames", )); } } } PyBPE::with_builder(builder, kwargs) } /// Read a :obj:`vocab.json` and a :obj:`merges.txt` files /// /// This method provides a way to read and parse the content of these files, /// returning the relevant data structures. If you want to instantiate some BPE models /// from memory, this method gives you the expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// merges (:obj:`str`): /// The path to a :obj:`merges.txt` file /// /// Returns: /// A :obj:`Tuple` with the vocab and the merges: /// The vocabulary and merges loaded into memory #[staticmethod] #[pyo3(text_signature = "(self, vocab, merges)")] fn read_file(vocab: &str, merges: &str) -> PyResult<(Vocab, Merges)> { BPE::read_file(vocab, merges).map_err(|e| { exceptions::PyException::new_err(format!( "Error while reading vocab & merges files: {}", e )) }) } /// Instantiate a BPE model from the given files. /// /// This method is roughly equivalent to doing:: /// /// vocab, merges = BPE.read_file(vocab_filename, merges_filename) /// bpe = BPE(vocab, merges) /// /// If you don't need to keep the :obj:`vocab, merges` values lying around, /// this method is more optimized than manually calling /// :meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// merges (:obj:`str`): /// The path to a :obj:`merges.txt` file /// /// Returns: /// :class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files #[classmethod] #[pyo3(signature = (vocab, merges, **kwargs))] #[pyo3(text_signature = "(cls, vocab, merge, **kwargs)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, merges: &str, kwargs: Option<&PyDict>, ) -> PyResult<Py<Self>> { let (vocab, merges) = BPE::read_file(vocab, merges).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading BPE files: {}", e)) })?; Py::new( py, PyBPE::new( py, Some(PyVocab::Vocab(vocab)), Some(PyMerges::Merges(merges)), kwargs, )?, ) } } /// An implementation of the WordPiece algorithm /// /// Args: /// vocab (:obj:`Dict[str, int]`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. /// /// max_input_chars_per_word (:obj:`int`, `optional`): /// The maximum number of characters to authorize in a single word. #[pyclass(extends=PyModel, module = "tokenizers.models", name = "WordPiece")] pub struct PyWordPiece {} impl PyWordPiece { fn with_builder( mut builder: WordPieceBuilder, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "unk_token" => { builder = builder.unk_token(val.extract()?); } "max_input_chars_per_word" => { builder = builder.max_input_chars_per_word(val.extract()?); } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(val.extract()?); } _ => println!("Ignored unknown kwargs option {}", key), } } } match builder.build() { Err(e) => Err(exceptions::PyException::new_err(format!( "Error while initializing WordPiece: {}", e ))), Ok(wordpiece) => Ok((PyWordPiece {}, wordpiece.into())), } } } #[pymethods] impl PyWordPiece { #[getter] fn get_unk_token(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: String) { setter!(self_, WordPiece, unk_token, unk_token); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix(self_: PyRef<Self>, continuing_subword_prefix: String) { setter!( self_, WordPiece, continuing_subword_prefix, continuing_subword_prefix ); } #[getter] fn get_max_input_chars_per_word(self_: PyRef<Self>) -> usize { getter!(self_, WordPiece, max_input_chars_per_word) } #[setter] fn set_max_input_chars_per_word(self_: PyRef<Self>, max: usize) { setter!(self_, WordPiece, max_input_chars_per_word, max); } #[new] #[pyo3(signature = (vocab=None, **kwargs), text_signature = "(self, vocab, unk_token, max_input_chars_per_word)")] fn new( py: Python<'_>, vocab: Option<PyVocab>, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { let mut builder = WordPiece::builder(); if let Some(vocab) = vocab { match vocab { PyVocab::Vocab(vocab) => { builder = builder.vocab(vocab); } PyVocab::Filename(vocab_filename) => { deprecation_warning( py, "0.9.0", "WordPiece.__init__ will not create from files anymore, try `WordPiece.from_file` instead", )?; builder = builder.files(vocab_filename.to_string()); } } } PyWordPiece::with_builder(builder, kwargs) } /// Read a :obj:`vocab.txt` file /// /// This method provides a way to read and parse the content of a standard `vocab.txt` /// file as used by the WordPiece Model, returning the relevant data structures. If you /// want to instantiate some WordPiece models from memory, this method gives you the /// expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.txt` file /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` #[staticmethod] #[pyo3(text_signature = "(vocab)")] fn read_file(vocab: &str) -> PyResult<Vocab> { WordPiece::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordPiece file: {}", e)) }) } /// Instantiate a WordPiece model from the given file /// /// This method is roughly equivalent to doing:: /// /// vocab = WordPiece.read_file(vocab_filename) /// wordpiece = WordPiece(vocab) /// /// If you don't need to keep the :obj:`vocab` values lying around, this method is /// more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to /// initialize a :class:`~tokenizers.models.WordPiece` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.txt` file /// /// Returns: /// :class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file #[classmethod] #[pyo3(signature = (vocab, **kwargs))] #[pyo3(text_signature = "(vocab, **kwargs)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, kwargs: Option<&PyDict>, ) -> PyResult<Py<Self>> { let vocab = WordPiece::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordPiece file: {}", e)) })?; Py::new( py, PyWordPiece::new(py, Some(PyVocab::Vocab(vocab)), kwargs)?, ) } } /// An implementation of the WordLevel algorithm /// /// Most simple tokenizer model based on mapping tokens to their corresponding id. /// /// Args: /// vocab (:obj:`str`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. #[pyclass(extends=PyModel, module = "tokenizers.models", name = "WordLevel")] pub struct PyWordLevel {} #[pymethods] impl PyWordLevel { #[getter] fn get_unk_token(self_: PyRef<Self>) -> String { getter!(self_, WordLevel, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: String) { setter!(self_, WordLevel, unk_token, unk_token); } #[new] #[pyo3(signature = (vocab=None, unk_token = None), text_signature = "(self, vocab, unk_token)")] fn new( py: Python<'_>, vocab: Option<PyVocab>, unk_token: Option<String>, ) -> PyResult<(Self, PyModel)> { let mut builder = WordLevel::builder(); if let Some(vocab) = vocab { match vocab { PyVocab::Vocab(vocab) => { builder = builder.vocab(vocab); } PyVocab::Filename(vocab_filename) => { deprecation_warning( py, "0.9.0", "WordLevel.__init__ will not create from files anymore, \ try `WordLevel.from_file` instead", )?; builder = builder.files(vocab_filename.to_string()); } }; } if let Some(unk_token) = unk_token { builder = builder.unk_token(unk_token); } Ok(( PyWordLevel {}, builder .build() .map_err(|e| exceptions::PyException::new_err(e.to_string()))? .into(), )) } /// Read a :obj:`vocab.json` /// /// This method provides a way to read and parse the content of a vocabulary file, /// returning the relevant data structures. If you want to instantiate some WordLevel models /// from memory, this method gives you the expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` #[staticmethod] #[pyo3(text_signature = "(vocab)")] fn read_file(vocab: &str) -> PyResult<Vocab> { WordLevel::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordLevel file: {}", e)) }) } /// Instantiate a WordLevel model from the given file /// /// This method is roughly equivalent to doing:: /// /// vocab = WordLevel.read_file(vocab_filename) /// wordlevel = WordLevel(vocab) /// /// If you don't need to keep the :obj:`vocab` values lying around, this method is /// more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to /// initialize a :class:`~tokenizers.models.WordLevel` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// Returns: /// :class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file #[classmethod] #[pyo3(signature = (vocab, unk_token = None))] #[pyo3(text_signature = "(vocab, unk_token)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, unk_token: Option<String>, ) -> PyResult<Py<Self>> { let vocab = WordLevel::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordLevel file: {}", e)) })?; Py::new( py, PyWordLevel::new(py, Some(PyVocab::Vocab(vocab)), unk_token)?, ) } } /// An implementation of the Unigram algorithm /// /// Args: /// vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`): /// A list of vocabulary items and their relative score [("am", -0.2442),...] #[pyclass(extends=PyModel, module = "tokenizers.models", name = "Unigram")] pub struct PyUnigram {} #[pymethods] impl PyUnigram { #[new] #[pyo3(text_signature = "(self, vocab, unk_id, byte_fallback)")] fn new( vocab: Option<Vec<(String, f64)>>, unk_id: Option<usize>, byte_fallback: Option<bool>, ) -> PyResult<(Self, PyModel)> { match (vocab, unk_id, byte_fallback) { (Some(vocab), unk_id, byte_fallback) => { let model = Unigram::from(vocab, unk_id, byte_fallback.unwrap_or(false)).map_err(|e| { exceptions::PyException::new_err(format!( "Error while loading Unigram: {}", e )) })?; Ok((PyUnigram {}, model.into())) } (None, None, _) => Ok((PyUnigram {}, Unigram::default().into())), _ => Err(exceptions::PyValueError::new_err( "`vocab` and `unk_id` must be both specified", )), } } } /// Models Module #[pymodule] pub fn models(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyModel>()?; m.add_class::<PyBPE>()?; m.add_class::<PyWordPiece>()?; m.add_class::<PyWordLevel>()?; m.add_class::<PyUnigram>()?; Ok(()) } #[cfg(test)] mod test { use crate::models::PyModel; use pyo3::prelude::*; use tk::models::bpe::BPE; use tk::models::ModelWrapper; #[test] fn get_subtype() { Python::with_gil(|py| { let py_model = PyModel::from(BPE::default()); let py_bpe = py_model.get_as_subtype(py).unwrap(); assert_eq!("BPE", py_bpe.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let rs_bpe = BPE::default(); let rs_bpe_ser = serde_json::to_string(&rs_bpe).unwrap(); let rs_wrapper: ModelWrapper = rs_bpe.into(); let rs_wrapper_ser = serde_json::to_string(&rs_wrapper).unwrap(); let py_model = PyModel::from(rs_wrapper); let py_ser = serde_json::to_string(&py_model).unwrap(); assert_eq!(py_ser, rs_bpe_ser); assert_eq!(py_ser, rs_wrapper_ser); let py_model: PyModel = serde_json::from_str(&rs_bpe_ser).unwrap(); match *py_model.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => (), _ => panic!("Expected Bert postprocessor."), }; let py_model: PyModel = serde_json::from_str(&rs_wrapper_ser).unwrap(); match *py_model.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => (), _ => panic!("Expected Bert postprocessor."), }; } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/trainers.rs
use std::sync::{Arc, RwLock}; use crate::models::PyModel; use crate::tokenizer::PyAddedToken; use crate::utils::PyChar; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::{Deserialize, Serialize}; use tk::models::TrainerWrapper; use tk::Trainer; use tokenizers as tk; /// Base class for all trainers /// /// This class is not supposed to be instantiated directly. Instead, any implementation of a /// Trainer will return an instance of this class when instantiated. #[pyclass(module = "tokenizers.trainers", name = "Trainer", subclass)] #[derive(Clone, Deserialize, Serialize)] pub struct PyTrainer { #[serde(flatten)] pub trainer: Arc<RwLock<TrainerWrapper>>, } impl PyTrainer { #[cfg(test)] pub(crate) fn new(trainer: Arc<RwLock<TrainerWrapper>>) -> Self { PyTrainer { trainer } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match *self.trainer.as_ref().read().unwrap() { TrainerWrapper::BpeTrainer(_) => Py::new(py, (PyBpeTrainer {}, base))?.into_py(py), TrainerWrapper::WordPieceTrainer(_) => { Py::new(py, (PyWordPieceTrainer {}, base))?.into_py(py) } TrainerWrapper::WordLevelTrainer(_) => { Py::new(py, (PyWordLevelTrainer {}, base))?.into_py(py) } TrainerWrapper::UnigramTrainer(_) => { Py::new(py, (PyUnigramTrainer {}, base))?.into_py(py) } }) } } #[pymethods] impl PyTrainer { fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.trainer).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PyTrainer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { let unpickled = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PyTrainer: {}", e )) })?; self.trainer = unpickled; Ok(()) } Err(e) => Err(e), } } } impl Trainer for PyTrainer { type Model = PyModel; fn should_show_progress(&self) -> bool { self.trainer.read().unwrap().should_show_progress() } fn train(&self, model: &mut PyModel) -> tk::Result<Vec<tk::AddedToken>> { self.trainer .read() .unwrap() .train(&mut model.model.write().unwrap()) } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> tk::Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> tk::Result<Vec<String>> + Sync, { self.trainer.write().unwrap().feed(iterator, process) } } impl<I> From<I> for PyTrainer where I: Into<TrainerWrapper>, { fn from(trainer: I) -> Self { PyTrainer { trainer: Arc::new(RwLock::new(trainer.into())), } } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let TrainerWrapper::$variant(ref trainer) = *super_.trainer.read().unwrap() { trainer.$($name)+ } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let TrainerWrapper::$variant(ref mut trainer) = *super_.trainer.write().unwrap() { trainer.$name = $value; } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let TrainerWrapper::$variant(ref mut trainer) = *super_.trainer.write().unwrap() { trainer.$name($value); } }}; } /// Trainer capable of training a BPE model /// /// Args: /// vocab_size (:obj:`int`, `optional`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// min_frequency (:obj:`int`, `optional`): /// The minimum frequency a pair should have in order to be merged. /// /// show_progress (:obj:`bool`, `optional`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): /// A list of special tokens the model should know of. /// /// limit_alphabet (:obj:`int`, `optional`): /// The maximum different characters to keep in the alphabet. /// /// initial_alphabet (:obj:`List[str]`, `optional`): /// A list of characters to include in the initial alphabet, even /// if not seen in the training dataset. /// If the strings contain more than one character, only the first one /// is kept. /// /// continuing_subword_prefix (:obj:`str`, `optional`): /// A prefix to be used for every subword that is not a beginning-of-word. /// /// end_of_word_suffix (:obj:`str`, `optional`): /// A suffix to be used for every subword that is a end-of-word. /// /// max_token_length (:obj:`int`, `optional`): /// Prevents creating tokens longer than the specified size. /// This can help with reducing polluting your vocabulary with /// highly repetitive tokens like `======` for wikipedia /// #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "BpeTrainer")] pub struct PyBpeTrainer {} #[pymethods] impl PyBpeTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> usize { getter!(self_, BpeTrainer, vocab_size) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) { setter!(self_, BpeTrainer, vocab_size, vocab_size); } #[getter] fn get_min_frequency(self_: PyRef<Self>) -> u32 { getter!(self_, BpeTrainer, min_frequency) } #[setter] fn set_min_frequency(self_: PyRef<Self>, freq: u32) { setter!(self_, BpeTrainer, min_frequency, freq); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, BpeTrainer, show_progress) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, BpeTrainer, show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, BpeTrainer, special_tokens .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> { setter!( self_, BpeTrainer, special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[getter] fn get_limit_alphabet(self_: PyRef<Self>) -> Option<usize> { getter!(self_, BpeTrainer, limit_alphabet) } #[setter] fn set_limit_alphabet(self_: PyRef<Self>, limit: Option<usize>) { setter!(self_, BpeTrainer, limit_alphabet, limit); } #[getter] fn get_max_token_length(self_: PyRef<Self>) -> Option<usize> { getter!(self_, BpeTrainer, max_token_length) } #[setter] fn set_max_token_length(self_: PyRef<Self>, limit: Option<usize>) { setter!(self_, BpeTrainer, max_token_length, limit); } #[getter] fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> { getter!( self_, BpeTrainer, initial_alphabet.iter().map(|c| c.to_string()).collect() ) } #[setter] fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<PyChar>) { setter!( self_, BpeTrainer, initial_alphabet, alphabet.into_iter().map(|c| c.0).collect() ); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BpeTrainer, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix(self_: PyRef<Self>, prefix: Option<String>) { setter!(self_, BpeTrainer, continuing_subword_prefix, prefix); } #[getter] fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BpeTrainer, end_of_word_suffix.clone()) } #[setter] fn set_end_of_word_suffix(self_: PyRef<Self>, suffix: Option<String>) { setter!(self_, BpeTrainer, end_of_word_suffix, suffix); } #[new] #[pyo3(signature = (**kwargs), text_signature = None)] pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::bpe::BpeTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "vocab_size" => builder = builder.vocab_size(val.extract()?), "min_frequency" => builder = builder.min_frequency(val.extract()?), "show_progress" => builder = builder.show_progress(val.extract()?), "special_tokens" => { builder = builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ); } "limit_alphabet" => builder = builder.limit_alphabet(val.extract()?), "max_token_length" => builder = builder.max_token_length(val.extract()?), "initial_alphabet" => { let alphabet: Vec<String> = val.extract()?; builder = builder.initial_alphabet( alphabet .into_iter() .filter_map(|s| s.chars().next()) .collect(), ); } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(val.extract()?) } "end_of_word_suffix" => builder = builder.end_of_word_suffix(val.extract()?), _ => println!("Ignored unknown kwargs option {}", key), }; } } Ok((PyBpeTrainer {}, builder.build().into())) } } /// Trainer capable of training a WordPiece model /// /// Args: /// vocab_size (:obj:`int`, `optional`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// min_frequency (:obj:`int`, `optional`): /// The minimum frequency a pair should have in order to be merged. /// /// show_progress (:obj:`bool`, `optional`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): /// A list of special tokens the model should know of. /// /// limit_alphabet (:obj:`int`, `optional`): /// The maximum different characters to keep in the alphabet. /// /// initial_alphabet (:obj:`List[str]`, `optional`): /// A list of characters to include in the initial alphabet, even /// if not seen in the training dataset. /// If the strings contain more than one character, only the first one /// is kept. /// /// continuing_subword_prefix (:obj:`str`, `optional`): /// A prefix to be used for every subword that is not a beginning-of-word. /// /// end_of_word_suffix (:obj:`str`, `optional`): /// A suffix to be used for every subword that is a end-of-word. #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "WordPieceTrainer")] pub struct PyWordPieceTrainer {} #[pymethods] impl PyWordPieceTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> usize { getter!(self_, WordPieceTrainer, vocab_size()) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) { setter!(self_, WordPieceTrainer, @set_vocab_size, vocab_size); } #[getter] fn get_min_frequency(self_: PyRef<Self>) -> u32 { getter!(self_, WordPieceTrainer, min_frequency()) } #[setter] fn set_min_frequency(self_: PyRef<Self>, freq: u32) { setter!(self_, WordPieceTrainer, @set_min_frequency, freq); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, WordPieceTrainer, show_progress()) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, WordPieceTrainer, @set_show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, WordPieceTrainer, special_tokens() .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> { setter!( self_, WordPieceTrainer, @set_special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[getter] fn get_limit_alphabet(self_: PyRef<Self>) -> Option<usize> { getter!(self_, WordPieceTrainer, limit_alphabet()) } #[setter] fn set_limit_alphabet(self_: PyRef<Self>, limit: Option<usize>) { setter!(self_, WordPieceTrainer, @set_limit_alphabet, limit); } #[getter] fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> { getter!( self_, WordPieceTrainer, initial_alphabet().iter().map(|c| c.to_string()).collect() ) } #[setter] fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<PyChar>) { setter!( self_, WordPieceTrainer, @set_initial_alphabet, alphabet.into_iter().map(|c| c.0).collect() ); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> { getter!(self_, WordPieceTrainer, continuing_subword_prefix().clone()) } #[setter] fn set_continuing_subword_prefix(self_: PyRef<Self>, prefix: Option<String>) { setter!(self_, WordPieceTrainer, @set_continuing_subword_prefix, prefix); } #[getter] fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> { getter!(self_, WordPieceTrainer, end_of_word_suffix().clone()) } #[setter] fn set_end_of_word_suffix(self_: PyRef<Self>, suffix: Option<String>) { setter!(self_, WordPieceTrainer, @set_end_of_word_suffix, suffix); } #[new] #[pyo3( signature = (** kwargs), text_signature = "(self, vocab_size=30000, min_frequency=0, show_progress=True, special_tokens=[], limit_alphabet=None, initial_alphabet= [],continuing_subword_prefix=\"##\", end_of_word_suffix=None)" )] pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::wordpiece::WordPieceTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "vocab_size" => builder = builder.vocab_size(val.extract()?), "min_frequency" => builder = builder.min_frequency(val.extract()?), "show_progress" => builder = builder.show_progress(val.extract()?), "special_tokens" => { builder = builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ); } "limit_alphabet" => builder = builder.limit_alphabet(val.extract()?), "initial_alphabet" => { let alphabet: Vec<String> = val.extract()?; builder = builder.initial_alphabet( alphabet .into_iter() .filter_map(|s| s.chars().next()) .collect(), ); } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(val.extract()?) } "end_of_word_suffix" => builder = builder.end_of_word_suffix(val.extract()?), _ => println!("Ignored unknown kwargs option {}", key), }; } } Ok((PyWordPieceTrainer {}, builder.build().into())) } } /// Trainer capable of training a WorldLevel model /// /// Args: /// vocab_size (:obj:`int`, `optional`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// min_frequency (:obj:`int`, `optional`): /// The minimum frequency a pair should have in order to be merged. /// /// show_progress (:obj:`bool`, `optional`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`): /// A list of special tokens the model should know of. #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "WordLevelTrainer")] pub struct PyWordLevelTrainer {} #[pymethods] impl PyWordLevelTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> usize { getter!(self_, WordLevelTrainer, vocab_size) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) { setter!(self_, WordLevelTrainer, vocab_size, vocab_size); } #[getter] fn get_min_frequency(self_: PyRef<Self>) -> u32 { getter!(self_, WordLevelTrainer, min_frequency) } #[setter] fn set_min_frequency(self_: PyRef<Self>, freq: u32) { setter!(self_, WordLevelTrainer, min_frequency, freq); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, WordLevelTrainer, show_progress) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, WordLevelTrainer, show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, WordLevelTrainer, special_tokens .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> { setter!( self_, WordLevelTrainer, special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[new] #[pyo3(signature = (**kwargs), text_signature = None)] pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::wordlevel::WordLevelTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "vocab_size" => { builder.vocab_size(val.extract()?); } "min_frequency" => { builder.min_frequency(val.extract()?); } "show_progress" => { builder.show_progress(val.extract()?); } "special_tokens" => { builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ); } _ => println!("Ignored unknown kwargs option {}", key), } } } Ok(( PyWordLevelTrainer {}, builder .build() .expect("WordLevelTrainerBuilder cannot fail") .into(), )) } } /// Trainer capable of training a Unigram model /// /// Args: /// vocab_size (:obj:`int`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// show_progress (:obj:`bool`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`): /// A list of special tokens the model should know of. /// /// initial_alphabet (:obj:`List[str]`): /// A list of characters to include in the initial alphabet, even /// if not seen in the training dataset. /// If the strings contain more than one character, only the first one /// is kept. /// /// shrinking_factor (:obj:`float`): /// The shrinking factor used at each step of the training to prune the /// vocabulary. /// /// unk_token (:obj:`str`): /// The token used for out-of-vocabulary tokens. /// /// max_piece_length (:obj:`int`): /// The maximum length of a given token. /// /// n_sub_iterations (:obj:`int`): /// The number of iterations of the EM algorithm to perform before /// pruning the vocabulary. #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "UnigramTrainer")] pub struct PyUnigramTrainer {} #[pymethods] impl PyUnigramTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> u32 { getter!(self_, UnigramTrainer, vocab_size) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: u32) { setter!(self_, UnigramTrainer, vocab_size, vocab_size); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, UnigramTrainer, show_progress) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, UnigramTrainer, show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, UnigramTrainer, special_tokens .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> { setter!( self_, UnigramTrainer, special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[getter] fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> { getter!( self_, UnigramTrainer, initial_alphabet.iter().map(|c| c.to_string()).collect() ) } #[setter] fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<PyChar>) { setter!( self_, UnigramTrainer, initial_alphabet, alphabet.into_iter().map(|c| c.0).collect() ); } #[new] #[pyo3( signature = (**kwargs), text_signature = "(self, vocab_size=8000, show_progress=True, special_tokens=[], shrinking_factor=0.75, unk_token=None, max_piece_length=16, n_sub_iterations=2)" )] pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::unigram::UnigramTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "vocab_size" => builder.vocab_size(val.extract()?), "show_progress" => builder.show_progress(val.extract()?), "n_sub_iterations" => builder.n_sub_iterations(val.extract()?), "shrinking_factor" => builder.shrinking_factor(val.extract()?), "unk_token" => builder.unk_token(val.extract()?), "max_piece_length" => builder.max_piece_length(val.extract()?), "seed_size" => builder.seed_size(val.extract()?), "initial_alphabet" => { let alphabet: Vec<String> = val.extract()?; builder.initial_alphabet( alphabet .into_iter() .filter_map(|s| s.chars().next()) .collect(), ) } "special_tokens" => builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ), _ => { println!("Ignored unknown kwargs option {}", key); &mut builder } }; } } let trainer: tokenizers::models::unigram::UnigramTrainer = builder.build().map_err(|e| { exceptions::PyException::new_err(format!("Cannot build UnigramTrainer: {}", e)) })?; Ok((PyUnigramTrainer {}, trainer.into())) } } /// Trainers Module #[pymodule] pub fn trainers(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyTrainer>()?; m.add_class::<PyBpeTrainer>()?; m.add_class::<PyWordPieceTrainer>()?; m.add_class::<PyWordLevelTrainer>()?; m.add_class::<PyUnigramTrainer>()?; Ok(()) } #[cfg(test)] mod tests { use super::*; use tk::models::bpe::trainer::BpeTrainer; #[test] fn get_subtype() { Python::with_gil(|py| { let py_trainer = PyTrainer::new(Arc::new(RwLock::new(BpeTrainer::default().into()))); let py_bpe = py_trainer.get_as_subtype(py).unwrap(); assert_eq!("BpeTrainer", py_bpe.as_ref(py).get_type().name().unwrap()); }) } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/processors.rs
use std::convert::TryInto; use std::sync::Arc; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use crate::encoding::PyEncoding; use crate::error::ToPyResult; use serde::{Deserialize, Serialize}; use tk::processors::bert::BertProcessing; use tk::processors::byte_level::ByteLevel; use tk::processors::roberta::RobertaProcessing; use tk::processors::sequence::Sequence; use tk::processors::template::{SpecialToken, Template}; use tk::processors::PostProcessorWrapper; use tk::{Encoding, PostProcessor}; use tokenizers as tk; /// Base class for all post-processors /// /// This class is not supposed to be instantiated directly. Instead, any implementation of /// a PostProcessor will return an instance of this class when instantiated. #[pyclass( dict, module = "tokenizers.processors", name = "PostProcessor", subclass )] #[derive(Clone, Deserialize, Serialize)] pub struct PyPostProcessor { #[serde(flatten)] pub processor: Arc<PostProcessorWrapper>, } impl PyPostProcessor { pub fn new(processor: Arc<PostProcessorWrapper>) -> Self { PyPostProcessor { processor } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match self.processor.as_ref() { PostProcessorWrapper::ByteLevel(_) => Py::new(py, (PyByteLevel {}, base))?.into_py(py), PostProcessorWrapper::Bert(_) => Py::new(py, (PyBertProcessing {}, base))?.into_py(py), PostProcessorWrapper::Roberta(_) => { Py::new(py, (PyRobertaProcessing {}, base))?.into_py(py) } PostProcessorWrapper::Template(_) => { Py::new(py, (PyTemplateProcessing {}, base))?.into_py(py) } PostProcessorWrapper::Sequence(_) => Py::new(py, (PySequence {}, base))?.into_py(py), }) } } impl PostProcessor for PyPostProcessor { fn added_tokens(&self, is_pair: bool) -> usize { self.processor.added_tokens(is_pair) } fn process_encodings( &self, encodings: Vec<Encoding>, add_special_tokens: bool, ) -> tk::Result<Vec<Encoding>> { self.processor .process_encodings(encodings, add_special_tokens) } } #[pymethods] impl PyPostProcessor { fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(self.processor.as_ref()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PostProcessor: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.processor = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PostProcessor: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Return the number of special tokens that would be added for single/pair sentences. /// /// Args: /// is_pair (:obj:`bool`): /// Whether the input would be a pair of sequences /// /// Returns: /// :obj:`int`: The number of tokens to add #[pyo3(text_signature = "(self, is_pair)")] fn num_special_tokens_to_add(&self, is_pair: bool) -> usize { self.processor.added_tokens(is_pair) } /// Post-process the given encodings, generating the final one /// /// Args: /// encoding (:class:`~tokenizers.Encoding`): /// The encoding for the first sequence /// /// pair (:class:`~tokenizers.Encoding`, `optional`): /// The encoding for the pair sequence /// /// add_special_tokens (:obj:`bool`): /// Whether to add the special tokens /// /// Return: /// :class:`~tokenizers.Encoding`: The final encoding #[pyo3(signature = (encoding, pair = None, add_special_tokens = true))] #[pyo3(text_signature = "(self, encoding, pair=None, add_special_tokens=True)")] fn process( &self, encoding: &PyEncoding, pair: Option<&PyEncoding>, add_special_tokens: bool, ) -> PyResult<PyEncoding> { let final_encoding = ToPyResult(self.processor.process( encoding.encoding.clone(), pair.map(|e| e.encoding.clone()), add_special_tokens, )) .into_py()?; Ok(final_encoding.into()) } } /// This post-processor takes care of adding the special tokens needed by /// a Bert model: /// /// - a SEP token /// - a CLS token /// /// Args: /// sep (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the SEP token, and its id /// /// cls (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the CLS token, and its id #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "BertProcessing")] pub struct PyBertProcessing {} #[pymethods] impl PyBertProcessing { #[new] #[pyo3(text_signature = "(self, sep, cls)")] fn new(sep: (String, u32), cls: (String, u32)) -> (Self, PyPostProcessor) { ( PyBertProcessing {}, PyPostProcessor::new(Arc::new(BertProcessing::new(sep, cls).into())), ) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [("", 0), ("", 0)]) } } /// This post-processor takes care of adding the special tokens needed by /// a Roberta model: /// /// - a SEP token /// - a CLS token /// /// It also takes care of trimming the offsets. /// By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't /// want the offsets to include these whitespaces, then this PostProcessor should be initialized /// with :obj:`trim_offsets=True` /// /// Args: /// sep (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the SEP token, and its id /// /// cls (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the CLS token, and its id /// /// trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to trim the whitespaces from the produced offsets. /// /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether the add_prefix_space option was enabled during pre-tokenization. This /// is relevant because it defines the way the offsets are trimmed out. #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "RobertaProcessing")] pub struct PyRobertaProcessing {} #[pymethods] impl PyRobertaProcessing { #[new] #[pyo3(signature = (sep, cls, trim_offsets = true, add_prefix_space = true), text_signature = "(self, sep, cls, trim_offsets=True, add_prefix_space=True)")] fn new( sep: (String, u32), cls: (String, u32), trim_offsets: bool, add_prefix_space: bool, ) -> (Self, PyPostProcessor) { let proc = RobertaProcessing::new(sep, cls) .trim_offsets(trim_offsets) .add_prefix_space(add_prefix_space); ( PyRobertaProcessing {}, PyPostProcessor::new(Arc::new(proc.into())), ) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [("", 0), ("", 0)]) } } /// This post-processor takes care of trimming the offsets. /// /// By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't /// want the offsets to include these whitespaces, then this PostProcessor must be used. /// /// Args: /// trim_offsets (:obj:`bool`): /// Whether to trim the whitespaces from the produced offsets. #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "ByteLevel")] pub struct PyByteLevel {} #[pymethods] impl PyByteLevel { #[new] #[pyo3(signature = (trim_offsets = None, **_kwargs), text_signature = "(self, trim_offsets=True)")] fn new(trim_offsets: Option<bool>, _kwargs: Option<&PyDict>) -> (Self, PyPostProcessor) { let mut byte_level = ByteLevel::default(); if let Some(to) = trim_offsets { byte_level = byte_level.trim_offsets(to); } ( PyByteLevel {}, PyPostProcessor::new(Arc::new(byte_level.into())), ) } } #[derive(Clone, Debug)] pub struct PySpecialToken(SpecialToken); impl From<PySpecialToken> for SpecialToken { fn from(v: PySpecialToken) -> Self { v.0 } } impl FromPyObject<'_> for PySpecialToken { fn extract(ob: &PyAny) -> PyResult<Self> { if let Ok(v) = ob.extract::<(String, u32)>() { Ok(Self(v.into())) } else if let Ok(v) = ob.extract::<(u32, String)>() { Ok(Self(v.into())) } else if let Ok(d) = ob.downcast::<PyDict>() { let id = d .get_item("id")? .ok_or_else(|| exceptions::PyValueError::new_err("`id` must be specified"))? .extract::<String>()?; let ids = d .get_item("ids")? .ok_or_else(|| exceptions::PyValueError::new_err("`ids` must be specified"))? .extract::<Vec<u32>>()?; let tokens = d .get_item("tokens")? .ok_or_else(|| exceptions::PyValueError::new_err("`tokens` must be specified"))? .extract::<Vec<String>>()?; Ok(Self( ToPyResult(SpecialToken::new(id, ids, tokens)).into_py()?, )) } else { Err(exceptions::PyTypeError::new_err( "Expected Union[Tuple[str, int], Tuple[int, str], dict]", )) } } } #[derive(Clone, Debug)] pub struct PyTemplate(Template); impl From<PyTemplate> for Template { fn from(v: PyTemplate) -> Self { v.0 } } impl FromPyObject<'_> for PyTemplate { fn extract(ob: &PyAny) -> PyResult<Self> { if let Ok(s) = ob.extract::<&str>() { Ok(Self( s.try_into().map_err(exceptions::PyValueError::new_err)?, )) } else if let Ok(s) = ob.extract::<Vec<&str>>() { Ok(Self( s.try_into().map_err(exceptions::PyValueError::new_err)?, )) } else { Err(exceptions::PyTypeError::new_err( "Expected Union[str, List[str]]", )) } } } /// Provides a way to specify templates in order to add the special tokens to each /// input sequence as relevant. /// /// Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to /// delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first /// sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair /// sequences. The final result looks like this: /// /// - Single sequence: :obj:`[CLS] Hello there [SEP]` /// - Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]` /// /// With the type ids as following:: /// /// [CLS] ... [SEP] ... [SEP] /// 0 0 0 1 1 /// /// You can achieve such behavior using a TemplateProcessing:: /// /// TemplateProcessing( /// single="[CLS] $0 [SEP]", /// pair="[CLS] $A [SEP] $B:1 [SEP]:1", /// special_tokens=[("[CLS]", 1), ("[SEP]", 0)], /// ) /// /// In this example, each input sequence is identified using a ``$`` construct. This identifier /// lets us specify each input sequence, and the type_id to use. When nothing is specified, /// it uses the default values. Here are the different ways to specify it: /// /// - Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B`` /// - Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ... /// - Specifying both: ``$A:0``, ``$B:1``, ... /// /// The same construct is used for special tokens: ``<identifier>(:<type_id>)?``. /// /// **Warning**: You must ensure that you are giving the correct tokens/ids as these /// will be added to the Encoding without any further check. If the given ids correspond /// to something totally different in a `Tokenizer` using this `PostProcessor`, it /// might lead to unexpected results. /// /// Args: /// single (:obj:`Template`): /// The template used for single sequences /// /// pair (:obj:`Template`): /// The template used when both sequences are specified /// /// special_tokens (:obj:`Tokens`): /// The list of special tokens used in each sequences /// /// Types: /// /// Template (:obj:`str` or :obj:`List`): /// - If a :obj:`str` is provided, the whitespace is used as delimiter between tokens /// - If a :obj:`List[str]` is provided, a list of tokens /// /// Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`): /// - A :obj:`Tuple` with both a token and its associated ID, in any order /// - A :obj:`dict` with the following keys: /// - "id": :obj:`str` => The special token id, as specified in the Template /// - "ids": :obj:`List[int]` => The associated IDs /// - "tokens": :obj:`List[str]` => The associated tokens /// /// The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have /// the same length. #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "TemplateProcessing")] pub struct PyTemplateProcessing {} #[pymethods] impl PyTemplateProcessing { #[new] #[pyo3(signature = (single = None, pair = None, special_tokens = None), text_signature = "(self, single, pair, special_tokens)")] fn new( single: Option<PyTemplate>, pair: Option<PyTemplate>, special_tokens: Option<Vec<PySpecialToken>>, ) -> PyResult<(Self, PyPostProcessor)> { let mut builder = tk::processors::template::TemplateProcessing::builder(); if let Some(seq) = single { builder.single(seq.into()); } if let Some(seq) = pair { builder.pair(seq.into()); } if let Some(sp) = special_tokens { builder.special_tokens(sp); } let processor = builder .build() .map_err(|e| exceptions::PyValueError::new_err(e.to_string()))?; Ok(( PyTemplateProcessing {}, PyPostProcessor::new(Arc::new(processor.into())), )) } } /// Sequence Processor /// /// Args: /// processors (:obj:`List[PostProcessor]`) /// The processors that need to be chained #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "Sequence")] pub struct PySequence {} #[pymethods] impl PySequence { #[new] #[pyo3(signature = (processors_py), text_signature = "(self, processors)")] fn new(processors_py: &PyList) -> (Self, PyPostProcessor) { let mut processors: Vec<PostProcessorWrapper> = Vec::with_capacity(processors_py.len()); for n in processors_py.iter() { let processor: PyRef<PyPostProcessor> = n.extract().unwrap(); let processor = processor.processor.as_ref(); processors.push(processor.clone()); } let sequence_processor = Sequence::new(processors); ( PySequence {}, PyPostProcessor::new(Arc::new(PostProcessorWrapper::Sequence(sequence_processor))), ) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } } /// Processors Module #[pymodule] pub fn processors(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyPostProcessor>()?; m.add_class::<PyBertProcessing>()?; m.add_class::<PyRobertaProcessing>()?; m.add_class::<PyByteLevel>()?; m.add_class::<PyTemplateProcessing>()?; m.add_class::<PySequence>()?; Ok(()) } #[cfg(test)] mod test { use std::sync::Arc; use pyo3::prelude::*; use tk::processors::bert::BertProcessing; use tk::processors::PostProcessorWrapper; use crate::processors::PyPostProcessor; #[test] fn get_subtype() { Python::with_gil(|py| { let py_proc = PyPostProcessor::new(Arc::new( BertProcessing::new(("SEP".into(), 0), ("CLS".into(), 1)).into(), )); let py_bert = py_proc.get_as_subtype(py).unwrap(); assert_eq!( "BertProcessing", py_bert.as_ref(py).get_type().name().unwrap() ); }) } #[test] fn serialize() { let rs_processing = BertProcessing::new(("SEP".into(), 0), ("CLS".into(), 1)); let rs_wrapper: PostProcessorWrapper = rs_processing.clone().into(); let rs_processing_ser = serde_json::to_string(&rs_processing).unwrap(); let rs_wrapper_ser = serde_json::to_string(&rs_wrapper).unwrap(); let py_processing = PyPostProcessor::new(Arc::new(rs_wrapper)); let py_ser = serde_json::to_string(&py_processing).unwrap(); assert_eq!(py_ser, rs_processing_ser); assert_eq!(py_ser, rs_wrapper_ser); let py_processing: PyPostProcessor = serde_json::from_str(&rs_processing_ser).unwrap(); match py_processing.processor.as_ref() { PostProcessorWrapper::Bert(_) => (), _ => panic!("Expected Bert postprocessor."), } let py_processing: PyPostProcessor = serde_json::from_str(&rs_wrapper_ser).unwrap(); match py_processing.processor.as_ref() { PostProcessorWrapper::Bert(_) => (), _ => panic!("Expected Bert postprocessor."), } } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/decoders.rs
use std::sync::{Arc, RwLock}; use crate::utils::PyChar; use crate::utils::PyPattern; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::de::Error; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use tk::decoders::bpe::BPEDecoder; use tk::decoders::byte_fallback::ByteFallback; use tk::decoders::byte_level::ByteLevel; use tk::decoders::ctc::CTC; use tk::decoders::fuse::Fuse; use tk::decoders::metaspace::Metaspace; use tk::decoders::sequence::Sequence; use tk::decoders::strip::Strip; use tk::decoders::wordpiece::WordPiece; use tk::decoders::DecoderWrapper; use tk::normalizers::replace::Replace; use tk::Decoder; use tokenizers as tk; use super::error::ToPyResult; /// Base class for all decoders /// /// This class is not supposed to be instantiated directly. Instead, any implementation of /// a Decoder will return an instance of this class when instantiated. #[pyclass(dict, module = "tokenizers.decoders", name = "Decoder", subclass)] #[derive(Clone, Deserialize, Serialize)] pub struct PyDecoder { #[serde(flatten)] pub(crate) decoder: PyDecoderWrapper, } impl PyDecoder { pub(crate) fn new(decoder: PyDecoderWrapper) -> Self { PyDecoder { decoder } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match &self.decoder { PyDecoderWrapper::Custom(_) => Py::new(py, base)?.into_py(py), PyDecoderWrapper::Wrapped(inner) => match &*inner.as_ref().read().unwrap() { DecoderWrapper::Metaspace(_) => Py::new(py, (PyMetaspaceDec {}, base))?.into_py(py), DecoderWrapper::WordPiece(_) => Py::new(py, (PyWordPieceDec {}, base))?.into_py(py), DecoderWrapper::ByteFallback(_) => { Py::new(py, (PyByteFallbackDec {}, base))?.into_py(py) } DecoderWrapper::Strip(_) => Py::new(py, (PyStrip {}, base))?.into_py(py), DecoderWrapper::Fuse(_) => Py::new(py, (PyFuseDec {}, base))?.into_py(py), DecoderWrapper::ByteLevel(_) => Py::new(py, (PyByteLevelDec {}, base))?.into_py(py), DecoderWrapper::Replace(_) => Py::new(py, (PyReplaceDec {}, base))?.into_py(py), DecoderWrapper::BPE(_) => Py::new(py, (PyBPEDecoder {}, base))?.into_py(py), DecoderWrapper::CTC(_) => Py::new(py, (PyCTCDecoder {}, base))?.into_py(py), DecoderWrapper::Sequence(_) => { Py::new(py, (PySequenceDecoder {}, base))?.into_py(py) } }, }) } } impl Decoder for PyDecoder { fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { self.decoder.decode_chain(tokens) } } #[pymethods] impl PyDecoder { #[staticmethod] fn custom(decoder: PyObject) -> Self { let decoder = PyDecoderWrapper::Custom(Arc::new(RwLock::new(CustomDecoder::new(decoder)))); PyDecoder::new(decoder) } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.decoder).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Decoder: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.decoder = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Decoder: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Decode the given list of tokens to a final string /// /// Args: /// tokens (:obj:`List[str]`): /// The list of tokens to decode /// /// Returns: /// :obj:`str`: The decoded string #[pyo3(text_signature = "(self, tokens)")] fn decode(&self, tokens: Vec<String>) -> PyResult<String> { ToPyResult(self.decoder.decode(tokens)).into() } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let PyDecoderWrapper::Wrapped(ref wrap) = super_.decoder { if let DecoderWrapper::$variant(ref dec) = *wrap.read().unwrap() { dec.$($name)+ } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyDecoderWrapper::Wrapped(ref wrap) = super_.decoder { if let DecoderWrapper::$variant(ref mut dec) = *wrap.write().unwrap() { dec.$name = $value; } } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyDecoderWrapper::Wrapped(ref wrap) = super_.decoder { if let DecoderWrapper::$variant(ref mut dec) = *wrap.write().unwrap() { dec.$name($value); } } }}; } /// ByteLevel Decoder /// /// This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel` /// :class:`~tokenizers.pre_tokenizers.PreTokenizer`. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "ByteLevel")] pub struct PyByteLevelDec {} #[pymethods] impl PyByteLevelDec { #[new] #[pyo3(signature = (**_kwargs), text_signature = "(self)")] fn new(_kwargs: Option<&PyDict>) -> (Self, PyDecoder) { (PyByteLevelDec {}, ByteLevel::default().into()) } } /// Replace Decoder /// /// This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace` /// :class:`~tokenizers.pre_tokenizers.PreTokenizer`. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Replace")] pub struct PyReplaceDec {} #[pymethods] impl PyReplaceDec { #[new] #[pyo3(text_signature = "(self, pattern, content)")] fn new(pattern: PyPattern, content: String) -> PyResult<(Self, PyDecoder)> { Ok(( PyReplaceDec {}, ToPyResult(Replace::new(pattern, content)).into_py()?.into(), )) } } /// WordPiece Decoder /// /// Args: /// prefix (:obj:`str`, `optional`, defaults to :obj:`##`): /// The prefix to use for subwords that are not a beginning-of-word /// /// cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation, /// and some abbreviated english forms. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "WordPiece")] pub struct PyWordPieceDec {} #[pymethods] impl PyWordPieceDec { #[getter] fn get_prefix(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, prefix.clone()) } #[setter] fn set_prefix(self_: PyRef<Self>, prefix: String) { setter!(self_, WordPiece, prefix, prefix); } #[getter] fn get_cleanup(self_: PyRef<Self>) -> bool { getter!(self_, WordPiece, cleanup) } #[setter] fn set_cleanup(self_: PyRef<Self>, cleanup: bool) { setter!(self_, WordPiece, cleanup, cleanup); } #[new] #[pyo3(signature = (prefix = String::from("##"), cleanup = true), text_signature = "(self, prefix=\"##\", cleanup=True)")] fn new(prefix: String, cleanup: bool) -> (Self, PyDecoder) { (PyWordPieceDec {}, WordPiece::new(prefix, cleanup).into()) } } /// ByteFallback Decoder /// ByteFallback is a simple trick which converts tokens looking like `<0x61>` /// to pure bytes, and attempts to make them into a string. If the tokens /// cannot be decoded you will get � instead for each inconvertable byte token /// #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "ByteFallback")] pub struct PyByteFallbackDec {} #[pymethods] impl PyByteFallbackDec { #[new] #[pyo3(signature = (), text_signature = "(self)")] fn new() -> (Self, PyDecoder) { (PyByteFallbackDec {}, ByteFallback::new().into()) } } /// Fuse Decoder /// Fuse simply fuses every token into a single string. /// This is the last step of decoding, this decoder exists only if /// there is need to add other decoders *after* the fusion #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Fuse")] pub struct PyFuseDec {} #[pymethods] impl PyFuseDec { #[new] #[pyo3(signature = (), text_signature = "(self)")] fn new() -> (Self, PyDecoder) { (PyFuseDec {}, Fuse::new().into()) } } /// Strip normalizer /// Strips n left characters of each token, or n right characters of each token #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Strip")] pub struct PyStrip {} #[pymethods] impl PyStrip { #[getter] fn get_start(self_: PyRef<Self>) -> usize { getter!(self_, Strip, start) } #[setter] fn set_start(self_: PyRef<Self>, start: usize) { setter!(self_, Strip, start, start) } #[getter] fn get_stop(self_: PyRef<Self>) -> usize { getter!(self_, Strip, stop) } #[setter] fn set_stop(self_: PyRef<Self>, stop: usize) { setter!(self_, Strip, stop, stop) } #[getter] fn get_content(self_: PyRef<Self>) -> char { getter!(self_, Strip, content) } #[setter] fn set_content(self_: PyRef<Self>, content: char) { setter!(self_, Strip, content, content) } #[new] #[pyo3(signature = (content=' ', left=0, right=0), text_signature = "(self, content, left=0, right=0)")] fn new(content: char, left: usize, right: usize) -> (Self, PyDecoder) { (PyStrip {}, Strip::new(content, left, right).into()) } } /// Metaspace Decoder /// /// Args: /// replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): /// The replacement character. Must be exactly one character. By default we /// use the `▁` (U+2581) meta symbol (Same as in SentencePiece). /// /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Metaspace")] pub struct PyMetaspaceDec {} #[pymethods] impl PyMetaspaceDec { #[getter] fn get_replacement(self_: PyRef<Self>) -> String { getter!(self_, Metaspace, get_replacement().to_string()) } #[setter] fn set_replacement(self_: PyRef<Self>, replacement: PyChar) { setter!(self_, Metaspace, @set_replacement, replacement.0); } #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, Metaspace, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, Metaspace, add_prefix_space, add_prefix_space); } #[new] #[pyo3(signature = (replacement = PyChar('▁'), add_prefix_space = true), text_signature = "(self, replacement = \"▁\", add_prefix_space = True)")] fn new(replacement: PyChar, add_prefix_space: bool) -> (Self, PyDecoder) { ( PyMetaspaceDec {}, Metaspace::new(replacement.0, add_prefix_space).into(), ) } } /// BPEDecoder Decoder /// /// Args: /// suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`): /// The suffix that was used to caracterize an end-of-word. This suffix will /// be replaced by whitespaces during the decoding #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "BPEDecoder")] pub struct PyBPEDecoder {} #[pymethods] impl PyBPEDecoder { #[getter] fn get_suffix(self_: PyRef<Self>) -> String { getter!(self_, BPE, suffix.clone()) } #[setter] fn set_suffix(self_: PyRef<Self>, suffix: String) { setter!(self_, BPE, suffix, suffix); } #[new] #[pyo3(signature = (suffix = String::from("</w>")), text_signature = "(self, suffix=\"</w>\")")] fn new(suffix: String) -> (Self, PyDecoder) { (PyBPEDecoder {}, BPEDecoder::new(suffix).into()) } } /// CTC Decoder /// /// Args: /// pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`): /// The pad token used by CTC to delimit a new token. /// word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`): /// The word delimiter token. It will be replaced by a <space> /// cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to cleanup some tokenization artifacts. /// Mainly spaces before punctuation, and some abbreviated english forms. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "CTC")] pub struct PyCTCDecoder {} #[pymethods] impl PyCTCDecoder { #[getter] fn get_pad_token(self_: PyRef<Self>) -> String { getter!(self_, CTC, pad_token.clone()) } #[setter] fn set_pad_token(self_: PyRef<Self>, pad_token: String) { setter!(self_, CTC, pad_token, pad_token); } #[getter] fn get_word_delimiter_token(self_: PyRef<Self>) -> String { getter!(self_, CTC, word_delimiter_token.clone()) } #[setter] fn set_word_delimiter_token(self_: PyRef<Self>, word_delimiter_token: String) { setter!(self_, CTC, word_delimiter_token, word_delimiter_token); } #[getter] fn get_cleanup(self_: PyRef<Self>) -> bool { getter!(self_, CTC, cleanup) } #[setter] fn set_cleanup(self_: PyRef<Self>, cleanup: bool) { setter!(self_, CTC, cleanup, cleanup); } #[new] #[pyo3(signature = ( pad_token = String::from("<pad>"), word_delimiter_token = String::from("|"), cleanup = true ), text_signature = "(self, pad_token=\"<pad>\", word_delimiter_token=\"|\", cleanup=True)")] fn new(pad_token: String, word_delimiter_token: String, cleanup: bool) -> (Self, PyDecoder) { ( PyCTCDecoder {}, CTC::new(pad_token, word_delimiter_token, cleanup).into(), ) } } /// Sequence Decoder /// /// Args: /// decoders (:obj:`List[Decoder]`) /// The decoders that need to be chained #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name="Sequence")] pub struct PySequenceDecoder {} #[pymethods] impl PySequenceDecoder { #[new] #[pyo3(signature = (decoders_py), text_signature = "(self, decoders)")] fn new(decoders_py: &PyList) -> PyResult<(Self, PyDecoder)> { let mut decoders: Vec<DecoderWrapper> = Vec::with_capacity(decoders_py.len()); for decoder_py in decoders_py.iter() { let decoder: PyRef<PyDecoder> = decoder_py.extract()?; let decoder = match &decoder.decoder { PyDecoderWrapper::Wrapped(inner) => inner, PyDecoderWrapper::Custom(_) => unimplemented!(), }; decoders.push(decoder.read().unwrap().clone()); } Ok((PySequenceDecoder {}, Sequence::new(decoders).into())) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } } #[derive(Clone)] pub(crate) struct CustomDecoder { inner: PyObject, } impl CustomDecoder { pub(crate) fn new(inner: PyObject) -> Self { CustomDecoder { inner } } } impl Decoder for CustomDecoder { fn decode(&self, tokens: Vec<String>) -> tk::Result<String> { Python::with_gil(|py| { let decoded = self .inner .call_method(py, "decode", (tokens,), None)? .extract(py)?; Ok(decoded) }) } fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { Python::with_gil(|py| { let decoded = self .inner .call_method(py, "decode_chain", (tokens,), None)? .extract(py)?; Ok(decoded) }) } } impl Serialize for CustomDecoder { fn serialize<S>(&self, _serializer: S) -> std::result::Result<S::Ok, S::Error> where S: Serializer, { Err(serde::ser::Error::custom( "Custom PyDecoder cannot be serialized", )) } } impl<'de> Deserialize<'de> for CustomDecoder { fn deserialize<D>(_deserializer: D) -> std::result::Result<Self, D::Error> where D: Deserializer<'de>, { Err(D::Error::custom("PyDecoder cannot be deserialized")) } } #[derive(Clone, Deserialize, Serialize)] #[serde(untagged)] pub(crate) enum PyDecoderWrapper { Custom(Arc<RwLock<CustomDecoder>>), Wrapped(Arc<RwLock<DecoderWrapper>>), } impl<I> From<I> for PyDecoderWrapper where I: Into<DecoderWrapper>, { fn from(norm: I) -> Self { PyDecoderWrapper::Wrapped(Arc::new(RwLock::new(norm.into()))) } } impl<I> From<I> for PyDecoder where I: Into<DecoderWrapper>, { fn from(dec: I) -> Self { PyDecoder { decoder: dec.into().into(), } } } impl Decoder for PyDecoderWrapper { fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { match self { PyDecoderWrapper::Wrapped(inner) => inner.read().unwrap().decode_chain(tokens), PyDecoderWrapper::Custom(inner) => inner.read().unwrap().decode_chain(tokens), } } } /// Decoders Module #[pymodule] pub fn decoders(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyDecoder>()?; m.add_class::<PyByteLevelDec>()?; m.add_class::<PyReplaceDec>()?; m.add_class::<PyWordPieceDec>()?; m.add_class::<PyByteFallbackDec>()?; m.add_class::<PyFuseDec>()?; m.add_class::<PyStrip>()?; m.add_class::<PyMetaspaceDec>()?; m.add_class::<PyBPEDecoder>()?; m.add_class::<PyCTCDecoder>()?; m.add_class::<PySequenceDecoder>()?; Ok(()) } #[cfg(test)] mod test { use std::sync::{Arc, RwLock}; use pyo3::prelude::*; use tk::decoders::metaspace::Metaspace; use tk::decoders::DecoderWrapper; use crate::decoders::{CustomDecoder, PyDecoder, PyDecoderWrapper}; #[test] fn get_subtype() { Python::with_gil(|py| { let py_dec = PyDecoder::new(Metaspace::default().into()); let py_meta = py_dec.get_as_subtype(py).unwrap(); assert_eq!("Metaspace", py_meta.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let py_wrapped: PyDecoderWrapper = Metaspace::default().into(); let py_ser = serde_json::to_string(&py_wrapped).unwrap(); let rs_wrapped = DecoderWrapper::Metaspace(Metaspace::default()); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_ser, rs_ser); let py_dec: PyDecoder = serde_json::from_str(&rs_ser).unwrap(); match py_dec.decoder { PyDecoderWrapper::Wrapped(msp) => match *msp.as_ref().read().unwrap() { DecoderWrapper::Metaspace(_) => {} _ => panic!("Expected Metaspace"), }, _ => panic!("Expected wrapped, not custom."), } let obj = Python::with_gil(|py| { let py_msp = PyDecoder::new(Metaspace::default().into()); let obj: PyObject = Py::new(py, py_msp).unwrap().into_py(py); obj }); let py_seq = PyDecoderWrapper::Custom(Arc::new(RwLock::new(CustomDecoder::new(obj)))); assert!(serde_json::to_string(&py_seq).is_err()); } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/encoding.rs
use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use tk::tokenizer::{Offsets, PaddingDirection}; use tk::utils::truncation::TruncationDirection; use tokenizers as tk; use crate::error::{deprecation_warning, PyError}; /// The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`. #[pyclass(dict, module = "tokenizers", name = "Encoding")] #[repr(transparent)] pub struct PyEncoding { pub encoding: tk::tokenizer::Encoding, } impl From<tk::tokenizer::Encoding> for PyEncoding { fn from(v: tk::tokenizer::Encoding) -> Self { Self { encoding: v } } } #[pymethods] impl PyEncoding { #[new] #[pyo3(text_signature = None)] fn new() -> Self { Self { encoding: tk::tokenizer::Encoding::default(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.encoding).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Encoding: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.encoding = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Encoding: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } fn __repr__(&self) -> PyResult<String> { Ok(format!( "Encoding(num_tokens={}, attributes=[ids, type_ids, tokens, offsets, \ attention_mask, special_tokens_mask, overflowing])", self.encoding.get_ids().len() )) } fn __len__(&self) -> PyResult<usize> { Ok(self.encoding.len()) } /// Merge the list of encodings into one final :class:`~tokenizers.Encoding` /// /// Args: /// encodings (A :obj:`List` of :class:`~tokenizers.Encoding`): /// The list of encodings that should be merged in one /// /// growing_offsets (:obj:`bool`, defaults to :obj:`True`): /// Whether the offsets should accumulate while merging /// /// Returns: /// :class:`~tokenizers.Encoding`: The resulting Encoding #[staticmethod] #[pyo3(signature = (encodings, growing_offsets = true))] #[pyo3(text_signature = "(encodings, growing_offsets=True)")] fn merge(encodings: Vec<PyRef<PyEncoding>>, growing_offsets: bool) -> PyEncoding { tk::tokenizer::Encoding::merge( encodings.into_iter().map(|e| e.encoding.clone()), growing_offsets, ) .into() } /// The number of sequences represented /// /// Returns: /// :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding` #[getter] fn get_n_sequences(&self) -> usize { self.encoding.n_sequences() } /// Set the given sequence index /// /// Set the given sequence index for the whole range of tokens contained in this /// :class:`~tokenizers.Encoding`. #[pyo3(text_signature = "(self, sequence_id)")] fn set_sequence_id(&mut self, sequence_id: usize) { self.encoding.set_sequence_id(sequence_id); } /// The generated IDs /// /// The IDs are the main input to a Language Model. They are the token indices, /// the numerical representations that a LM understands. /// /// Returns: /// :obj:`List[int]`: The list of IDs #[getter] fn get_ids(&self) -> Vec<u32> { self.encoding.get_ids().to_vec() } /// The generated tokens /// /// They are the string representation of the IDs. /// /// Returns: /// :obj:`List[str]`: The list of tokens #[getter] fn get_tokens(&self) -> Vec<String> { self.encoding.get_tokens().to_vec() } /// The generated word indices. /// /// .. warning:: /// This is deprecated and will be removed in a future version. /// Please use :obj:`~tokenizers.Encoding.word_ids` instead. /// /// They represent the index of the word associated to each token. /// When the input is pre-tokenized, they correspond to the ID of the given input label, /// otherwise they correspond to the words indices as defined by the /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. /// /// For special tokens and such (any token that was generated from something that was /// not part of the input), the output is :obj:`None` /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. #[getter] fn get_words(&self, py: Python<'_>) -> PyResult<Vec<Option<u32>>> { deprecation_warning( py, "0.9.4", "Encoding.words is deprecated, please use Encoding.word_ids instead.", )?; Ok(self.get_word_ids()) } /// The generated word indices. /// /// They represent the index of the word associated to each token. /// When the input is pre-tokenized, they correspond to the ID of the given input label, /// otherwise they correspond to the words indices as defined by the /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. /// /// For special tokens and such (any token that was generated from something that was /// not part of the input), the output is :obj:`None` /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. #[getter] fn get_word_ids(&self) -> Vec<Option<u32>> { self.encoding.get_word_ids().to_vec() } /// The generated sequence indices. /// /// They represent the index of the input sequence associated to each token. /// The sequence id can be None if the token is not related to any input sequence, /// like for example with special tokens. /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index. #[getter] fn get_sequence_ids(&self) -> Vec<Option<usize>> { self.encoding.get_sequence_ids() } /// The generated type IDs /// /// Generally used for tasks like sequence classification or question answering, /// these tokens let the LM know which input sequence corresponds to each tokens. /// /// Returns: /// :obj:`List[int]`: The list of type ids #[getter] fn get_type_ids(&self) -> Vec<u32> { self.encoding.get_type_ids().to_vec() } /// The offsets associated to each token /// /// These offsets let's you slice the input string, and thus retrieve the original /// part that led to producing the corresponding token. /// /// Returns: /// A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets #[getter] fn get_offsets(&self) -> Vec<(usize, usize)> { self.encoding.get_offsets().to_vec() } /// The special token mask /// /// This indicates which tokens are special tokens, and which are not. /// /// Returns: /// :obj:`List[int]`: The special tokens mask #[getter] fn get_special_tokens_mask(&self) -> Vec<u32> { self.encoding.get_special_tokens_mask().to_vec() } /// The attention mask /// /// This indicates to the LM which tokens should be attended to, and which should not. /// This is especially important when batching sequences, where we need to applying /// padding. /// /// Returns: /// :obj:`List[int]`: The attention mask #[getter] fn get_attention_mask(&self) -> Vec<u32> { self.encoding.get_attention_mask().to_vec() } /// A :obj:`List` of overflowing :class:`~tokenizers.Encoding` /// /// When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting /// the output into as many pieces as required to match the specified maximum length. /// This field lets you retrieve all the subsequent pieces. /// /// When you use pairs of sequences, the overflowing pieces will contain enough /// variations to cover all the possible combinations, while respecting the provided /// maximum length. #[getter] fn get_overflowing(&self) -> Vec<PyEncoding> { self.encoding .get_overflowing() .clone() .into_iter() .map(|e| e.into()) .collect() } /// Get the encoded tokens corresponding to the word at the given index /// in one of the input sequences. /// /// Args: /// word_index (:obj:`int`): /// The index of a word in one of the input sequences. /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target word /// /// Returns: /// :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)` #[pyo3(signature = (word_index, sequence_index = 0))] #[pyo3(text_signature = "(self, word_index, sequence_index=0)")] fn word_to_tokens(&self, word_index: u32, sequence_index: usize) -> Option<(usize, usize)> { self.encoding.word_to_tokens(word_index, sequence_index) } /// Get the offsets of the word at the given index in one of the input sequences. /// /// Args: /// word_index (:obj:`int`): /// The index of a word in one of the input sequences. /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target word /// /// Returns: /// :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)` #[pyo3(signature = (word_index, sequence_index = 0))] #[pyo3(text_signature = "(self, word_index, sequence_index=0)")] fn word_to_chars(&self, word_index: u32, sequence_index: usize) -> Option<Offsets> { self.encoding.word_to_chars(word_index, sequence_index) } /// Get the index of the sequence represented by the given token. /// /// In the general use case, this method returns :obj:`0` for a single sequence or /// the first sequence of a pair, and :obj:`1` for the second sequence of a pair /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`int`: The sequence id of the given token #[pyo3(text_signature = "(self, token_index)")] fn token_to_sequence(&self, token_index: usize) -> Option<usize> { self.encoding.token_to_sequence(token_index) } /// Get the offsets of the token at the given index. /// /// The returned offsets are related to the input sequence that contains the /// token. In order to determine in which input sequence it belongs, you /// must call :meth:`~tokenizers.Encoding.token_to_sequence()`. /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)` #[pyo3(text_signature = "(self, token_index)")] fn token_to_chars(&self, token_index: usize) -> Option<Offsets> { let (_, offsets) = self.encoding.token_to_chars(token_index)?; Some(offsets) } /// Get the index of the word that contains the token in one of the input sequences. /// /// The returned word index is related to the input sequence that contains /// the token. In order to determine in which input sequence it belongs, you /// must call :meth:`~tokenizers.Encoding.token_to_sequence()`. /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`int`: The index of the word in the relevant input sequence. #[pyo3(text_signature = "(self, token_index)")] fn token_to_word(&self, token_index: usize) -> Option<u32> { let (_, word_idx) = self.encoding.token_to_word(token_index)?; Some(word_idx) } /// Get the token that contains the char at the given position in the input sequence. /// /// Args: /// char_pos (:obj:`int`): /// The position of a char in the input string /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target char /// /// Returns: /// :obj:`int`: The index of the token that contains this char in the encoded sequence #[pyo3(signature = (char_pos, sequence_index = 0))] #[pyo3(text_signature = "(self, char_pos, sequence_index=0)")] fn char_to_token(&self, char_pos: usize, sequence_index: usize) -> Option<usize> { self.encoding.char_to_token(char_pos, sequence_index) } /// Get the word that contains the char at the given position in the input sequence. /// /// Args: /// char_pos (:obj:`int`): /// The position of a char in the input string /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target char /// /// Returns: /// :obj:`int`: The index of the word that contains this char in the input sequence #[pyo3(signature = (char_pos, sequence_index = 0))] #[pyo3(text_signature = "(self, char_pos, sequence_index=0)")] fn char_to_word(&self, char_pos: usize, sequence_index: usize) -> Option<u32> { self.encoding.char_to_word(char_pos, sequence_index) } /// Pad the :class:`~tokenizers.Encoding` at the given length /// /// Args: /// length (:obj:`int`): /// The desired length /// /// direction: (:obj:`str`, defaults to :obj:`right`): /// The expected padding direction. Can be either :obj:`right` or :obj:`left` /// /// pad_id (:obj:`int`, defaults to :obj:`0`): /// The ID corresponding to the padding token /// /// pad_type_id (:obj:`int`, defaults to :obj:`0`): /// The type ID corresponding to the padding token /// /// pad_token (:obj:`str`, defaults to `[PAD]`): /// The pad token to use #[pyo3(signature = (length, **kwargs))] #[pyo3( text_signature = "(self, length, direction='right', pad_id=0, pad_type_id=0, pad_token='[PAD]')" )] fn pad(&mut self, length: usize, kwargs: Option<&PyDict>) -> PyResult<()> { let mut pad_id = 0; let mut pad_type_id = 0; let mut pad_token = "[PAD]"; let mut direction = PaddingDirection::Right; if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "direction" => { let value: &str = value.extract()?; direction = match value { "left" => Ok(PaddingDirection::Left), "right" => Ok(PaddingDirection::Right), other => Err(PyError(format!( "Unknown `direction`: `{}`. Use \ one of `left` or `right`", other )) .into_pyerr::<exceptions::PyValueError>()), }?; } "pad_id" => pad_id = value.extract()?, "pad_type_id" => pad_type_id = value.extract()?, "pad_token" => pad_token = value.extract()?, _ => println!("Ignored unknown kwarg option {}", key), } } } self.encoding .pad(length, pad_id, pad_type_id, pad_token, direction); Ok(()) } /// Truncate the :class:`~tokenizers.Encoding` at the given length /// /// If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating /// this information is lost. It will be considered as representing a single sequence. /// /// Args: /// max_length (:obj:`int`): /// The desired length /// /// stride (:obj:`int`, defaults to :obj:`0`): /// The length of previous content to be included in each overflowing piece /// /// direction (:obj:`str`, defaults to :obj:`right`): /// Truncate direction #[pyo3(signature = (max_length, stride = 0, direction = "right"))] #[pyo3(text_signature = "(self, max_length, stride=0, direction='right')")] fn truncate(&mut self, max_length: usize, stride: usize, direction: &str) -> PyResult<()> { let tdir = match direction { "left" => Ok(TruncationDirection::Left), "right" => Ok(TruncationDirection::Right), _ => Err(PyError(format!( "Invalid truncation direction value : {}", direction )) .into_pyerr::<exceptions::PyValueError>()), }?; self.encoding.truncate(max_length, stride, tdir); Ok(()) } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/pre_tokenizers.rs
use std::sync::{Arc, RwLock}; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::ser::SerializeStruct; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use tk::normalizer::SplitDelimiterBehavior; use tk::pre_tokenizers::bert::BertPreTokenizer; use tk::pre_tokenizers::byte_level::ByteLevel; use tk::pre_tokenizers::delimiter::CharDelimiterSplit; use tk::pre_tokenizers::digits::Digits; use tk::pre_tokenizers::metaspace::{Metaspace, PrependScheme}; use tk::pre_tokenizers::punctuation::Punctuation; use tk::pre_tokenizers::split::Split; use tk::pre_tokenizers::unicode_scripts::UnicodeScripts; use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use tk::pre_tokenizers::PreTokenizerWrapper; use tk::tokenizer::Offsets; use tk::{PreTokenizedString, PreTokenizer}; use tokenizers as tk; use super::error::ToPyResult; use super::utils::*; /// Base class for all pre-tokenizers /// /// This class is not supposed to be instantiated directly. Instead, any implementation of a /// PreTokenizer will return an instance of this class when instantiated. #[pyclass( dict, module = "tokenizers.pre_tokenizers", name = "PreTokenizer", subclass )] #[derive(Clone, Serialize, Deserialize)] pub struct PyPreTokenizer { #[serde(flatten)] pub(crate) pretok: PyPreTokenizerTypeWrapper, } impl PyPreTokenizer { #[allow(dead_code)] pub(crate) fn new(pretok: PyPreTokenizerTypeWrapper) -> Self { PyPreTokenizer { pretok } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match &self.pretok { PyPreTokenizerTypeWrapper::Sequence(_) => { Py::new(py, (PySequence {}, base))?.into_py(py) } PyPreTokenizerTypeWrapper::Single(ref inner) => { match &*inner.as_ref().read().unwrap() { PyPreTokenizerWrapper::Custom(_) => Py::new(py, base)?.into_py(py), PyPreTokenizerWrapper::Wrapped(inner) => match inner { PreTokenizerWrapper::Whitespace(_) => { Py::new(py, (PyWhitespace {}, base))?.into_py(py) } PreTokenizerWrapper::Split(_) => { Py::new(py, (PySplit {}, base))?.into_py(py) } PreTokenizerWrapper::Punctuation(_) => { Py::new(py, (PyPunctuation {}, base))?.into_py(py) } PreTokenizerWrapper::Sequence(_) => { Py::new(py, (PySequence {}, base))?.into_py(py) } PreTokenizerWrapper::Metaspace(_) => { Py::new(py, (PyMetaspace {}, base))?.into_py(py) } PreTokenizerWrapper::Delimiter(_) => { Py::new(py, (PyCharDelimiterSplit {}, base))?.into_py(py) } PreTokenizerWrapper::WhitespaceSplit(_) => { Py::new(py, (PyWhitespaceSplit {}, base))?.into_py(py) } PreTokenizerWrapper::ByteLevel(_) => { Py::new(py, (PyByteLevel {}, base))?.into_py(py) } PreTokenizerWrapper::BertPreTokenizer(_) => { Py::new(py, (PyBertPreTokenizer {}, base))?.into_py(py) } PreTokenizerWrapper::Digits(_) => { Py::new(py, (PyDigits {}, base))?.into_py(py) } PreTokenizerWrapper::UnicodeScripts(_) => { Py::new(py, (PyUnicodeScripts {}, base))?.into_py(py) } }, } } }) } } impl PreTokenizer for PyPreTokenizer { fn pre_tokenize(&self, normalized: &mut PreTokenizedString) -> tk::Result<()> { self.pretok.pre_tokenize(normalized) } } #[pymethods] impl PyPreTokenizer { #[staticmethod] fn custom(pretok: PyObject) -> Self { PyPreTokenizer { pretok: PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(pretok)).into(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.pretok).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PreTokenizer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { let unpickled = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PreTokenizer: {}", e )) })?; self.pretok = unpickled; Ok(()) } Err(e) => Err(e), } } /// Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place /// /// This method allows to modify a :class:`~tokenizers.PreTokenizedString` to /// keep track of the pre-tokenization, and leverage the capabilities of the /// :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of /// the pre-tokenization of a raw string, you can use /// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` /// /// Args: /// pretok (:class:`~tokenizers.PreTokenizedString): /// The pre-tokenized string on which to apply this /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` #[pyo3(text_signature = "(self, pretok)")] fn pre_tokenize(&self, pretok: &mut PyPreTokenizedString) -> PyResult<()> { ToPyResult(self.pretok.pre_tokenize(&mut pretok.pretok)).into() } /// Pre tokenize the given string /// /// This method provides a way to visualize the effect of a /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the /// alignment, nor does it provide all the capabilities of the /// :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use /// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` /// /// Args: /// sequence (:obj:`str`): /// A string to pre-tokeize /// /// Returns: /// :obj:`List[Tuple[str, Offsets]]`: /// A list of tuple with the pre-tokenized parts and their offsets #[pyo3(text_signature = "(self, sequence)")] fn pre_tokenize_str(&self, s: &str) -> PyResult<Vec<(String, Offsets)>> { let mut pretokenized = tk::tokenizer::PreTokenizedString::from(s); ToPyResult(self.pretok.pre_tokenize(&mut pretokenized)).into_py()?; Ok(pretokenized .get_splits(tk::OffsetReferential::Original, tk::OffsetType::Char) .into_iter() .map(|(s, o, _)| (s.to_owned(), o)) .collect()) } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref pretok)) = *single.read().unwrap() { pretok.$($name)+ } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) = *single.write().unwrap() { pretok.$name = $value; } } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) = *single.write().unwrap() { pretok.$name($value); } } }}; } /// ByteLevel PreTokenizer /// /// This pre-tokenizer takes care of replacing all bytes of the given string /// with a corresponding representation, as well as splitting into words. /// /// Args: /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. /// use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Set this to :obj:`False` to prevent this `pre_tokenizer` from using /// the GPT2 specific regexp for spliting on whitespace. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "ByteLevel")] pub struct PyByteLevel {} #[pymethods] impl PyByteLevel { #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, ByteLevel, add_prefix_space, add_prefix_space); } #[getter] fn get_use_regex(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, use_regex) } #[setter] fn set_use_regex(self_: PyRef<Self>, use_regex: bool) { setter!(self_, ByteLevel, use_regex, use_regex); } #[new] #[pyo3(signature = (add_prefix_space = true, use_regex = true, **_kwargs), text_signature = "(self, add_prefix_space=True, use_regex=True)")] fn new( add_prefix_space: bool, use_regex: bool, _kwargs: Option<&PyDict>, ) -> (Self, PyPreTokenizer) { ( PyByteLevel {}, ByteLevel::default() .add_prefix_space(add_prefix_space) .use_regex(use_regex) .into(), ) } /// Returns the alphabet used by this PreTokenizer. /// /// Since the ByteLevel works as its name suggests, at the byte level, it /// encodes each byte value to a unique visible character. This means that there is a /// total of 256 different characters composing this alphabet. /// /// Returns: /// :obj:`List[str]`: A list of characters that compose the alphabet #[staticmethod] #[pyo3(text_signature = "()")] fn alphabet() -> Vec<String> { ByteLevel::alphabet() .into_iter() .map(|c| c.to_string()) .collect() } } /// This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+` #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Whitespace")] pub struct PyWhitespace {} #[pymethods] impl PyWhitespace { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyWhitespace {}, Whitespace {}.into()) } } /// This pre-tokenizer simply splits on the whitespace. Works like `.split()` #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "WhitespaceSplit")] pub struct PyWhitespaceSplit {} #[pymethods] impl PyWhitespaceSplit { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyWhitespaceSplit {}, WhitespaceSplit.into()) } } /// Split PreTokenizer /// /// This versatile pre-tokenizer splits using the provided pattern and /// according to the provided behavior. The pattern can be inverted by /// making use of the invert flag. /// /// Args: /// pattern (:obj:`str` or :class:`~tokenizers.Regex`): /// A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex` /// /// behavior (:class:`~tokenizers.SplitDelimiterBehavior`): /// The behavior to use when splitting. /// Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", /// "contiguous" /// /// invert (:obj:`bool`, `optional`, defaults to :obj:`False`): /// Whether to invert the pattern. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Split")] pub struct PySplit {} #[pymethods] impl PySplit { #[new] #[pyo3(signature = (pattern, behavior, invert = false), text_signature = "(self, pattern, behavior, invert=False)")] fn new( pattern: PyPattern, behavior: PySplitDelimiterBehavior, invert: bool, ) -> PyResult<(Self, PyPreTokenizer)> { Ok(( PySplit {}, ToPyResult(Split::new(pattern, behavior.into(), invert)) .into_py()? .into(), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [" ", "removed"]) } } /// This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)` /// /// Args: /// delimiter: str: /// The delimiter char that will be used to split input #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "CharDelimiterSplit")] pub struct PyCharDelimiterSplit {} #[pymethods] impl PyCharDelimiterSplit { #[getter] fn get_delimiter(self_: PyRef<Self>) -> String { getter!(self_, Delimiter, delimiter.to_string()) } #[setter] fn set_delimiter(self_: PyRef<Self>, delimiter: PyChar) { setter!(self_, Delimiter, delimiter, delimiter.0); } #[new] #[pyo3(text_signature = None)] pub fn new(delimiter: PyChar) -> PyResult<(Self, PyPreTokenizer)> { Ok(( PyCharDelimiterSplit {}, CharDelimiterSplit::new(delimiter.0).into(), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [" "]) } } /// BertPreTokenizer /// /// This pre-tokenizer splits tokens on spaces, and also on punctuation. /// Each occurence of a punctuation character will be treated separately. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "BertPreTokenizer")] pub struct PyBertPreTokenizer {} #[pymethods] impl PyBertPreTokenizer { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyBertPreTokenizer {}, BertPreTokenizer.into()) } } /// This pre-tokenizer simply splits on punctuation as individual characters. /// /// Args: /// behavior (:class:`~tokenizers.SplitDelimiterBehavior`): /// The behavior to use when splitting. /// Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next", /// "contiguous" #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Punctuation")] pub struct PyPunctuation {} #[pymethods] impl PyPunctuation { #[new] #[pyo3( signature = (behavior = PySplitDelimiterBehavior(SplitDelimiterBehavior::Isolated)), text_signature = "(self, behavior=\"isolated\")")] fn new(behavior: PySplitDelimiterBehavior) -> (Self, PyPreTokenizer) { (PyPunctuation {}, Punctuation::new(behavior.into()).into()) } } /// This pre-tokenizer composes other pre_tokenizers and applies them in sequence #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Sequence")] pub struct PySequence {} #[pymethods] impl PySequence { #[new] #[pyo3(text_signature = "(self, pretokenizers)")] fn new(pre_tokenizers: &PyList) -> PyResult<(Self, PyPreTokenizer)> { let mut sequence = Vec::with_capacity(pre_tokenizers.len()); for n in pre_tokenizers.iter() { let pretokenizer: PyRef<PyPreTokenizer> = n.extract()?; match &pretokenizer.pretok { PyPreTokenizerTypeWrapper::Sequence(inner) => { sequence.extend(inner.iter().cloned()) } PyPreTokenizerTypeWrapper::Single(inner) => sequence.push(inner.clone()), } } Ok(( PySequence {}, PyPreTokenizer::new(PyPreTokenizerTypeWrapper::Sequence(sequence)), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } } fn from_string(string: String) -> Result<PrependScheme, PyErr> { let scheme = match string.as_str() { "first" => PrependScheme::First, "never" => PrependScheme::Never, "always" => PrependScheme::Always, _ => { return Err(exceptions::PyValueError::new_err(format!( "{} is an unknown variant, should be one of ['first', 'never', 'always']", string ))); } }; Ok(scheme) } /// Metaspace pre-tokenizer /// /// This pre-tokenizer replaces any whitespace by the provided replacement character. /// It then tries to split on these spaces. /// /// Args: /// replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): /// The replacement character. Must be exactly one character. By default we /// use the `▁` (U+2581) meta symbol (Same as in SentencePiece). /// /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Metaspace")] pub struct PyMetaspace {} #[pymethods] impl PyMetaspace { #[getter] fn get_replacement(self_: PyRef<Self>) -> String { getter!(self_, Metaspace, get_replacement().to_string()) } #[setter] fn set_replacement(self_: PyRef<Self>, replacement: PyChar) { setter!(self_, Metaspace, @set_replacement, replacement.0); } #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, Metaspace, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, Metaspace, add_prefix_space, add_prefix_space); } #[getter] fn get_prepend_scheme(self_: PyRef<Self>) -> String { // Assuming Metaspace has a method to get the prepend_scheme as a string let scheme: PrependScheme = getter!(self_, Metaspace, get_prepend_scheme()); match scheme { PrependScheme::First => "first", PrependScheme::Never => "never", PrependScheme::Always => "always", } .to_string() } #[setter] fn set_prepend_scheme(self_: PyRef<Self>, prepend_scheme: String) -> PyResult<()> { let scheme = from_string(prepend_scheme)?; setter!(self_, Metaspace, @set_prepend_scheme, scheme); Ok(()) } #[new] #[pyo3(signature = (replacement = PyChar('▁'), add_prefix_space = true, prepend_scheme=None, **_kwargs), text_signature = "(self, replacement=\"_\", add_prefix_space=True)")] fn new( replacement: PyChar, add_prefix_space: bool, prepend_scheme: Option<String>, _kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyPreTokenizer)> { // Create a new Metaspace instance let mut new_instance: Metaspace = Metaspace::new(replacement.0, add_prefix_space); // If a prepend scheme is provided, set it if let Some(prepend_scheme) = prepend_scheme { match from_string(prepend_scheme) { Ok(prepend_scheme_enum) => new_instance.set_prepend_scheme(prepend_scheme_enum), Err(err) => return Err(err), } } Ok((PyMetaspace {}, new_instance.into())) } } /// This pre-tokenizer simply splits using the digits in separate tokens /// /// Args: /// individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`): /// If set to True, digits will each be separated as follows:: /// /// "Call 123 please" -> "Call ", "1", "2", "3", " please" /// /// If set to False, digits will grouped as follows:: /// /// "Call 123 please" -> "Call ", "123", " please" #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Digits")] pub struct PyDigits {} #[pymethods] impl PyDigits { #[getter] fn get_individual_digits(self_: PyRef<Self>) -> bool { getter!(self_, Digits, individual_digits) } #[setter] fn set_individual_digits(self_: PyRef<Self>, individual_digits: bool) { setter!(self_, Digits, individual_digits, individual_digits); } #[new] #[pyo3(signature = (individual_digits = false), text_signature = "(self, individual_digits=False)")] fn new(individual_digits: bool) -> (Self, PyPreTokenizer) { (PyDigits {}, Digits::new(individual_digits).into()) } } /// This pre-tokenizer splits on characters that belong to different language family /// It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt /// Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too. /// This mimicks SentencePiece Unigram implementation. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "UnicodeScripts")] pub struct PyUnicodeScripts {} #[pymethods] impl PyUnicodeScripts { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyUnicodeScripts {}, UnicodeScripts::new().into()) } } #[derive(Clone)] pub(crate) struct CustomPreTokenizer { inner: PyObject, } impl CustomPreTokenizer { pub fn new(inner: PyObject) -> Self { Self { inner } } } impl tk::tokenizer::PreTokenizer for CustomPreTokenizer { fn pre_tokenize(&self, sentence: &mut PreTokenizedString) -> tk::Result<()> { Python::with_gil(|py| { let pretok = PyPreTokenizedStringRefMut::new(sentence); let py_pretok = self.inner.as_ref(py); py_pretok.call_method("pre_tokenize", (pretok.get(),), None)?; Ok(()) }) } } impl Serialize for CustomPreTokenizer { fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { Err(serde::ser::Error::custom( "Custom PreTokenizer cannot be serialized", )) } } impl<'de> Deserialize<'de> for CustomPreTokenizer { fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { Err(serde::de::Error::custom( "Custom PreTokenizer cannot be deserialized", )) } } #[derive(Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyPreTokenizerWrapper { Custom(CustomPreTokenizer), Wrapped(PreTokenizerWrapper), } impl Serialize for PyPreTokenizerWrapper { fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error> where S: Serializer, { match self { PyPreTokenizerWrapper::Wrapped(inner) => inner.serialize(serializer), PyPreTokenizerWrapper::Custom(inner) => inner.serialize(serializer), } } } #[derive(Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyPreTokenizerTypeWrapper { Sequence(Vec<Arc<RwLock<PyPreTokenizerWrapper>>>), Single(Arc<RwLock<PyPreTokenizerWrapper>>), } impl Serialize for PyPreTokenizerTypeWrapper { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self { PyPreTokenizerTypeWrapper::Sequence(seq) => { let mut ser = serializer.serialize_struct("Sequence", 2)?; ser.serialize_field("type", "Sequence")?; ser.serialize_field("pretokenizers", seq)?; ser.end() } PyPreTokenizerTypeWrapper::Single(inner) => inner.serialize(serializer), } } } impl<I> From<I> for PyPreTokenizerWrapper where I: Into<PreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizerWrapper::Wrapped(pretok.into()) } } impl<I> From<I> for PyPreTokenizerTypeWrapper where I: Into<PyPreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizerTypeWrapper::Single(Arc::new(RwLock::new(pretok.into()))) } } impl<I> From<I> for PyPreTokenizer where I: Into<PreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizer { pretok: pretok.into().into(), } } } impl PreTokenizer for PyPreTokenizerTypeWrapper { fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> { match self { PyPreTokenizerTypeWrapper::Single(inner) => inner.read().unwrap().pre_tokenize(pretok), PyPreTokenizerTypeWrapper::Sequence(inner) => inner .iter() .try_for_each(|n| n.read().unwrap().pre_tokenize(pretok)), } } } impl PreTokenizer for PyPreTokenizerWrapper { fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> { match self { PyPreTokenizerWrapper::Wrapped(inner) => inner.pre_tokenize(pretok), PyPreTokenizerWrapper::Custom(inner) => inner.pre_tokenize(pretok), } } } /// PreTokenizers Module #[pymodule] pub fn pre_tokenizers(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyPreTokenizer>()?; m.add_class::<PyByteLevel>()?; m.add_class::<PyWhitespace>()?; m.add_class::<PyWhitespaceSplit>()?; m.add_class::<PySplit>()?; m.add_class::<PyBertPreTokenizer>()?; m.add_class::<PyMetaspace>()?; m.add_class::<PyCharDelimiterSplit>()?; m.add_class::<PyPunctuation>()?; m.add_class::<PySequence>()?; m.add_class::<PyDigits>()?; m.add_class::<PyUnicodeScripts>()?; Ok(()) } #[cfg(test)] mod test { use pyo3::prelude::*; use tk::pre_tokenizers::sequence::Sequence; use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use tk::pre_tokenizers::PreTokenizerWrapper; use crate::pre_tokenizers::{ CustomPreTokenizer, PyPreTokenizer, PyPreTokenizerTypeWrapper, PyPreTokenizerWrapper, }; #[test] fn get_subtype() { Python::with_gil(|py| { let py_norm = PyPreTokenizer::new(Whitespace {}.into()); let py_wsp = py_norm.get_as_subtype(py).unwrap(); assert_eq!("Whitespace", py_wsp.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let py_wrapped: PyPreTokenizerWrapper = Whitespace {}.into(); let py_ser = serde_json::to_string(&py_wrapped).unwrap(); let rs_wrapped = PreTokenizerWrapper::Whitespace(Whitespace {}); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_ser, rs_ser); let py_pretok: PyPreTokenizer = serde_json::from_str(&rs_ser).unwrap(); match py_pretok.pretok { PyPreTokenizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::Whitespace(_)) => {} _ => panic!("Expected Whitespace"), }, _ => panic!("Expected wrapped, not custom."), } let py_seq: PyPreTokenizerWrapper = Sequence::new(vec![Whitespace {}.into(), WhitespaceSplit.into()]).into(); let py_wrapper_ser = serde_json::to_string(&py_seq).unwrap(); let rs_wrapped = PreTokenizerWrapper::Sequence(Sequence::new(vec![ Whitespace {}.into(), WhitespaceSplit.into(), ])); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_wrapper_ser, rs_ser); let py_seq = PyPreTokenizer::new(py_seq.into()); let py_ser = serde_json::to_string(&py_seq).unwrap(); assert_eq!(py_wrapper_ser, py_ser); let obj = Python::with_gil(|py| { let py_wsp = PyPreTokenizer::new(Whitespace {}.into()); let obj: PyObject = Py::new(py, py_wsp).unwrap().into_py(py); obj }); let py_seq: PyPreTokenizerWrapper = PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(obj)); assert!(serde_json::to_string(&py_seq).is_err()); } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/tokenizer.rs
use std::collections::{hash_map::DefaultHasher, HashMap}; use std::hash::{Hash, Hasher}; use numpy::{npyffi, PyArray1}; use pyo3::class::basic::CompareOp; use pyo3::exceptions; use pyo3::intern; use pyo3::prelude::*; use pyo3::types::*; use tk::models::bpe::BPE; use tk::tokenizer::{ Model, PaddingDirection, PaddingParams, PaddingStrategy, PostProcessor, TokenizerImpl, TruncationDirection, TruncationParams, TruncationStrategy, }; use tk::utils::iter::ResultShunt; use tokenizers as tk; use super::decoders::PyDecoder; use super::encoding::PyEncoding; use super::error::{PyError, ToPyResult}; use super::models::PyModel; use super::normalizers::PyNormalizer; use super::pre_tokenizers::PyPreTokenizer; use super::trainers::PyTrainer; use crate::processors::PyPostProcessor; use crate::utils::{MaybeSizedIterator, PyBufferedIterator}; use std::collections::BTreeMap; /// Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`. /// It can have special options that defines the way it should behave. /// /// Args: /// content (:obj:`str`): The content of the token /// /// single_word (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should only match single words. If :obj:`True`, this /// token will never match inside of a word. For example the token ``ing`` would match /// on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`. /// The notion of "`inside of a word`" is defined by the word boundaries pattern in /// regular expressions (ie. the token should start and end with word boundaries). /// /// lstrip (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should strip all potential whitespaces on its left side. /// If :obj:`True`, this token will greedily match any whitespace on its left. For /// example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text /// ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left). /// /// rstrip (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should strip all potential whitespaces on its right /// side. If :obj:`True`, this token will greedily match any whitespace on its right. /// It works just like :obj:`lstrip` but on the right. /// /// normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): /// Defines whether this token should match against the normalized version of the input /// text. For example, with the added token ``"yesterday"``, and a normalizer in charge of /// lowercasing the text, the token could be extract from the input ``"I saw a lion /// Yesterday"``. /// special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): /// Defines whether this token should be skipped when decoding. /// #[pyclass(dict, module = "tokenizers", name = "AddedToken")] pub struct PyAddedToken { pub content: String, pub special: bool, pub single_word: Option<bool>, pub lstrip: Option<bool>, pub rstrip: Option<bool>, pub normalized: Option<bool>, } impl PyAddedToken { pub fn from<S: Into<String>>(content: S, special: Option<bool>) -> Self { Self { content: content.into(), special: special.unwrap_or(false), single_word: None, lstrip: None, rstrip: None, normalized: None, } } pub fn get_token(&self) -> tk::tokenizer::AddedToken { let mut token = tk::AddedToken::from(&self.content, self.special); if let Some(sw) = self.single_word { token = token.single_word(sw); } if let Some(ls) = self.lstrip { token = token.lstrip(ls); } if let Some(rs) = self.rstrip { token = token.rstrip(rs); } if let Some(n) = self.normalized { token = token.normalized(n); } token } pub fn as_pydict<'py>(&self, py: Python<'py>) -> PyResult<&'py PyDict> { let dict = PyDict::new(py); let token = self.get_token(); dict.set_item("content", token.content)?; dict.set_item("single_word", token.single_word)?; dict.set_item("lstrip", token.lstrip)?; dict.set_item("rstrip", token.rstrip)?; dict.set_item("normalized", token.normalized)?; dict.set_item("special", token.special)?; Ok(dict) } } impl From<tk::AddedToken> for PyAddedToken { fn from(token: tk::AddedToken) -> Self { Self { content: token.content, single_word: Some(token.single_word), lstrip: Some(token.lstrip), rstrip: Some(token.rstrip), normalized: Some(token.normalized), special: token.special, } } } #[pymethods] impl PyAddedToken { #[new] #[pyo3(signature = (content=None, **kwargs), text_signature = "(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False)")] fn __new__(content: Option<&str>, kwargs: Option<&PyDict>) -> PyResult<Self> { let mut token = PyAddedToken::from(content.unwrap_or(""), None); if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "single_word" => token.single_word = Some(value.extract()?), "lstrip" => token.lstrip = Some(value.extract()?), "rstrip" => token.rstrip = Some(value.extract()?), "normalized" => token.normalized = Some(value.extract()?), "special" => token.special = value.extract()?, _ => println!("Ignored unknown kwarg option {}", key), } } } Ok(token) } fn __getstate__<'py>(&self, py: Python<'py>) -> PyResult<&'py PyDict> { self.as_pydict(py) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyDict>(py) { Ok(state) => { for (key, value) in state { let key: &str = key.extract()?; match key { "content" => self.content = value.extract()?, "single_word" => self.single_word = Some(value.extract()?), "lstrip" => self.lstrip = Some(value.extract()?), "rstrip" => self.rstrip = Some(value.extract()?), "normalized" => self.normalized = Some(value.extract()?), "special" => self.special = value.extract()?, _ => {} } } Ok(()) } Err(e) => Err(e), } } /// Get the content of this :obj:`AddedToken` #[getter] fn get_content(&self) -> &str { &self.content } /// Set the content of this :obj:`AddedToken` #[setter] fn set_content(&mut self, content: String) { self.content = content; } /// Get the value of the :obj:`rstrip` option #[getter] fn get_rstrip(&self) -> bool { self.get_token().rstrip } /// Get the value of the :obj:`lstrip` option #[getter] fn get_lstrip(&self) -> bool { self.get_token().lstrip } /// Get the value of the :obj:`single_word` option #[getter] fn get_single_word(&self) -> bool { self.get_token().single_word } /// Get the value of the :obj:`normalized` option #[getter] fn get_normalized(&self) -> bool { self.get_token().normalized } /// Get the value of the :obj:`special` option #[getter] fn get_special(&self) -> bool { self.get_token().special } /// Set the value of the :obj:`special` option #[setter] fn set_special(&mut self, special: bool) { self.special = special; } fn __str__(&self) -> PyResult<&str> { Ok(&self.content) } fn __repr__(&self) -> PyResult<String> { let bool_to_python = |p| match p { true => "True", false => "False", }; let token = self.get_token(); Ok(format!( "AddedToken(\"{}\", rstrip={}, lstrip={}, single_word={}, normalized={}, special={})", self.content, bool_to_python(token.rstrip), bool_to_python(token.lstrip), bool_to_python(token.single_word), bool_to_python(token.normalized), bool_to_python(token.special) )) } fn __richcmp__(&self, other: Py<PyAddedToken>, op: CompareOp) -> bool { use CompareOp::*; Python::with_gil(|py| match op { Lt | Le | Gt | Ge => false, Eq => self.get_token() == other.borrow(py).get_token(), Ne => self.get_token() != other.borrow(py).get_token(), }) } fn __hash__(&self) -> u64 { let mut hasher = DefaultHasher::new(); self.get_token().hash(&mut hasher); hasher.finish() } } struct TextInputSequence<'s>(tk::InputSequence<'s>); impl<'s> FromPyObject<'s> for TextInputSequence<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { let err = exceptions::PyTypeError::new_err("TextInputSequence must be str"); if let Ok(s) = ob.downcast::<PyString>() { Ok(Self(s.to_string_lossy().into())) } else { Err(err) } } } impl<'s> From<TextInputSequence<'s>> for tk::InputSequence<'s> { fn from(s: TextInputSequence<'s>) -> Self { s.0 } } struct PyArrayUnicode(Vec<String>); impl FromPyObject<'_> for PyArrayUnicode { fn extract(ob: &PyAny) -> PyResult<Self> { // SAFETY Making sure the pointer is a valid numpy array requires calling numpy C code if unsafe { npyffi::PyArray_Check(ob.py(), ob.as_ptr()) } == 0 { return Err(exceptions::PyTypeError::new_err("Expected an np.array")); } let arr = ob.as_ptr() as *mut npyffi::PyArrayObject; // SAFETY Getting all the metadata about the numpy array to check its sanity let (type_num, elsize, alignment, data, nd, flags) = unsafe { let desc = (*arr).descr; ( (*desc).type_num, (*desc).elsize as usize, (*desc).alignment as usize, (*arr).data, (*arr).nd, (*arr).flags, ) }; if nd != 1 { return Err(exceptions::PyTypeError::new_err( "Expected a 1 dimensional np.array", )); } if flags & (npyffi::NPY_ARRAY_C_CONTIGUOUS | npyffi::NPY_ARRAY_F_CONTIGUOUS) == 0 { return Err(exceptions::PyTypeError::new_err( "Expected a contiguous np.array", )); } if type_num != npyffi::types::NPY_TYPES::NPY_UNICODE as i32 { return Err(exceptions::PyTypeError::new_err( "Expected a np.array[dtype='U']", )); } // SAFETY Looking at the raw numpy data to create new owned Rust strings via copies (so it's safe afterwards). unsafe { let n_elem = *(*arr).dimensions as usize; let all_bytes = std::slice::from_raw_parts(data as *const u8, elsize * n_elem); let seq = (0..n_elem) .map(|i| { let bytes = &all_bytes[i * elsize..(i + 1) * elsize]; let unicode = pyo3::ffi::PyUnicode_FromKindAndData( pyo3::ffi::PyUnicode_4BYTE_KIND as _, bytes.as_ptr() as *const _, elsize as isize / alignment as isize, ); let py = ob.py(); let obj = PyObject::from_owned_ptr(py, unicode); let s = obj.downcast::<PyString>(py)?; Ok(s.to_string_lossy().trim_matches(char::from(0)).to_owned()) }) .collect::<PyResult<Vec<_>>>()?; Ok(Self(seq)) } } } impl From<PyArrayUnicode> for tk::InputSequence<'_> { fn from(s: PyArrayUnicode) -> Self { s.0.into() } } struct PyArrayStr(Vec<String>); impl FromPyObject<'_> for PyArrayStr { fn extract(ob: &PyAny) -> PyResult<Self> { let array = ob.downcast::<PyArray1<PyObject>>()?; let seq = array .readonly() .as_array() .iter() .map(|obj| { let s = obj.downcast::<PyString>(ob.py())?; Ok(s.to_string_lossy().into_owned()) }) .collect::<PyResult<Vec<_>>>()?; Ok(Self(seq)) } } impl From<PyArrayStr> for tk::InputSequence<'_> { fn from(s: PyArrayStr) -> Self { s.0.into() } } struct PreTokenizedInputSequence<'s>(tk::InputSequence<'s>); impl<'s> FromPyObject<'s> for PreTokenizedInputSequence<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { if let Ok(seq) = ob.extract::<PyArrayUnicode>() { return Ok(Self(seq.into())); } if let Ok(seq) = ob.extract::<PyArrayStr>() { return Ok(Self(seq.into())); } if let Ok(s) = ob.downcast::<PyList>() { if let Ok(seq) = s.extract::<Vec<&str>>() { return Ok(Self(seq.into())); } } if let Ok(s) = ob.downcast::<PyTuple>() { if let Ok(seq) = s.extract::<Vec<&str>>() { return Ok(Self(seq.into())); } } Err(exceptions::PyTypeError::new_err( "PreTokenizedInputSequence must be Union[List[str], Tuple[str]]", )) } } impl<'s> From<PreTokenizedInputSequence<'s>> for tk::InputSequence<'s> { fn from(s: PreTokenizedInputSequence<'s>) -> Self { s.0 } } struct TextEncodeInput<'s>(tk::EncodeInput<'s>); impl<'s> FromPyObject<'s> for TextEncodeInput<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { if let Ok(i) = ob.extract::<TextInputSequence>() { return Ok(Self(i.into())); } if let Ok((i1, i2)) = ob.extract::<(TextInputSequence, TextInputSequence)>() { return Ok(Self((i1, i2).into())); } if let Ok(arr) = ob.extract::<Vec<&PyAny>>() { if arr.len() == 2 { let first = arr[0].extract::<TextInputSequence>()?; let second = arr[1].extract::<TextInputSequence>()?; return Ok(Self((first, second).into())); } } Err(exceptions::PyTypeError::new_err( "TextEncodeInput must be Union[TextInputSequence, Tuple[InputSequence, InputSequence]]", )) } } impl<'s> From<TextEncodeInput<'s>> for tk::tokenizer::EncodeInput<'s> { fn from(i: TextEncodeInput<'s>) -> Self { i.0 } } struct PreTokenizedEncodeInput<'s>(tk::EncodeInput<'s>); impl<'s> FromPyObject<'s> for PreTokenizedEncodeInput<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { if let Ok(i) = ob.extract::<PreTokenizedInputSequence>() { return Ok(Self(i.into())); } if let Ok((i1, i2)) = ob.extract::<(PreTokenizedInputSequence, PreTokenizedInputSequence)>() { return Ok(Self((i1, i2).into())); } if let Ok(arr) = ob.extract::<Vec<&PyAny>>() { if arr.len() == 2 { let first = arr[0].extract::<PreTokenizedInputSequence>()?; let second = arr[1].extract::<PreTokenizedInputSequence>()?; return Ok(Self((first, second).into())); } } Err(exceptions::PyTypeError::new_err( "PreTokenizedEncodeInput must be Union[PreTokenizedInputSequence, \ Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence]]", )) } } impl<'s> From<PreTokenizedEncodeInput<'s>> for tk::tokenizer::EncodeInput<'s> { fn from(i: PreTokenizedEncodeInput<'s>) -> Self { i.0 } } type Tokenizer = TokenizerImpl<PyModel, PyNormalizer, PyPreTokenizer, PyPostProcessor, PyDecoder>; /// A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input /// and outputs an :class:`~tokenizers.Encoding`. /// /// Args: /// model (:class:`~tokenizers.models.Model`): /// The core algorithm that this :obj:`Tokenizer` should be using. /// #[pyclass(dict, module = "tokenizers", name = "Tokenizer")] #[derive(Clone)] pub struct PyTokenizer { tokenizer: Tokenizer, } impl PyTokenizer { fn new(tokenizer: Tokenizer) -> Self { PyTokenizer { tokenizer } } fn from_model(model: PyModel) -> Self { PyTokenizer::new(TokenizerImpl::new(model)) } } #[pymethods] impl PyTokenizer { #[new] #[pyo3(text_signature = "(self, model)")] fn __new__(model: PyRef<PyModel>) -> Self { PyTokenizer::from_model(model.clone()) } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.tokenizer).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Tokenizer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.tokenizer = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Tokenizer: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { let model = PyModel::from(BPE::default()).into_py(py); PyTuple::new(py, vec![model]) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string. /// /// Args: /// json (:obj:`str`): /// A valid JSON string representing a previously serialized /// :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(json)")] fn from_str(json: &str) -> PyResult<Self> { let tokenizer: PyResult<_> = ToPyResult(json.parse()).into(); Ok(Self::new(tokenizer?)) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path. /// /// Args: /// path (:obj:`str`): /// A path to a local JSON file representing a previously serialized /// :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(path)")] fn from_file(path: &str) -> PyResult<Self> { let tokenizer: PyResult<_> = ToPyResult(Tokenizer::from_file(path)).into(); Ok(Self::new(tokenizer?)) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer. /// /// Args: /// buffer (:obj:`bytes`): /// A buffer containing a previously serialized :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(buffer)")] fn from_buffer(buffer: &PyBytes) -> PyResult<Self> { let tokenizer = serde_json::from_slice(buffer.as_bytes()).map_err(|e| { exceptions::PyValueError::new_err(format!( "Cannot instantiate Tokenizer from buffer: {}", e )) })?; Ok(Self { tokenizer }) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the /// Hugging Face Hub. /// /// Args: /// identifier (:obj:`str`): /// The identifier of a Model on the Hugging Face Hub, that contains /// a tokenizer.json file /// revision (:obj:`str`, defaults to `main`): /// A branch or commit id /// auth_token (:obj:`str`, `optional`, defaults to `None`): /// An optional auth token used to access private repositories on the /// Hugging Face Hub /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(signature = (identifier, revision = String::from("main"), auth_token = None))] #[pyo3(text_signature = "(identifier, revision=\"main\", auth_token=None)")] fn from_pretrained( identifier: &str, revision: String, auth_token: Option<String>, ) -> PyResult<Self> { let path = Python::with_gil(|py| -> PyResult<String> { let huggingface_hub = PyModule::import(py, intern!(py, "huggingface_hub"))?; let hf_hub_download = huggingface_hub.getattr(intern!(py, "hf_hub_download"))?; let kwargs = [ (intern!(py, "repo_id"), identifier), (intern!(py, "filename"), "tokenizer.json"), (intern!(py, "revision"), &revision), ] .into_py_dict(py); if let Some(auth_token) = auth_token { kwargs.set_item(intern!(py, "token"), auth_token)?; } let path: String = hf_hub_download.call((), Some(kwargs))?.extract()?; Ok(path) })?; let tokenizer: PyResult<_> = ToPyResult(Tokenizer::from_file(path)).into(); Ok(Self::new(tokenizer?)) } /// Gets a serialized string representing this :class:`~tokenizers.Tokenizer`. /// /// Args: /// pretty (:obj:`bool`, defaults to :obj:`False`): /// Whether the JSON string should be pretty formatted. /// /// Returns: /// :obj:`str`: A string representing the serialized Tokenizer #[pyo3(signature = (pretty = false))] #[pyo3(text_signature = "(self, pretty=False)")] fn to_str(&self, pretty: bool) -> PyResult<String> { ToPyResult(self.tokenizer.to_string(pretty)).into() } /// Save the :class:`~tokenizers.Tokenizer` to the file at the given path. /// /// Args: /// path (:obj:`str`): /// A path to a file in which to save the serialized tokenizer. /// /// pretty (:obj:`bool`, defaults to :obj:`True`): /// Whether the JSON file should be pretty formatted. #[pyo3(signature = (path, pretty = true))] #[pyo3(text_signature = "(self, path, pretty=True)")] fn save(&self, path: &str, pretty: bool) -> PyResult<()> { ToPyResult(self.tokenizer.save(path, pretty)).into() } /// Return the number of special tokens that would be added for single/pair sentences. /// :param is_pair: Boolean indicating if the input would be a single sentence or a pair /// :return: #[pyo3(text_signature = "(self, is_pair)")] fn num_special_tokens_to_add(&self, is_pair: bool) -> usize { self.tokenizer .get_post_processor() .map_or(0, |p| p.added_tokens(is_pair)) } /// Get the underlying vocabulary /// /// Args: /// with_added_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to include the added tokens /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary #[pyo3(signature = (with_added_tokens = true))] #[pyo3(text_signature = "(self, with_added_tokens=True)")] fn get_vocab(&self, with_added_tokens: bool) -> HashMap<String, u32> { self.tokenizer.get_vocab(with_added_tokens) } /// Get the underlying vocabulary /// /// Returns: /// :obj:`Dict[int, AddedToken]`: The vocabulary #[pyo3(signature = ())] #[pyo3(text_signature = "(self)")] fn get_added_tokens_decoder(&self) -> BTreeMap<u32, PyAddedToken> { let mut sorted_map = BTreeMap::new(); for (key, value) in self.tokenizer.get_added_tokens_decoder() { sorted_map.insert(key, value.into()); } sorted_map } /// Get the size of the underlying vocabulary /// /// Args: /// with_added_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to include the added tokens /// /// Returns: /// :obj:`int`: The size of the vocabulary #[pyo3(signature = (with_added_tokens = true))] #[pyo3(text_signature = "(self, with_added_tokens=True)")] fn get_vocab_size(&self, with_added_tokens: bool) -> usize { self.tokenizer.get_vocab_size(with_added_tokens) } /// Enable truncation /// /// Args: /// max_length (:obj:`int`): /// The max length at which to truncate /// /// stride (:obj:`int`, `optional`): /// The length of the previous first sequence to be included in the overflowing /// sequence /// /// strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`): /// The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or /// ``only_second``. /// /// direction (:obj:`str`, defaults to :obj:`right`): /// Truncate direction #[pyo3(signature = (max_length, **kwargs))] #[pyo3( text_signature = "(self, max_length, stride=0, strategy='longest_first', direction='right')" )] fn enable_truncation(&mut self, max_length: usize, kwargs: Option<&PyDict>) -> PyResult<()> { let mut params = TruncationParams { max_length, ..Default::default() }; if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "stride" => params.stride = value.extract()?, "strategy" => { let value: &str = value.extract()?; params.strategy = match value { "longest_first" => Ok(TruncationStrategy::LongestFirst), "only_first" => Ok(TruncationStrategy::OnlyFirst), "only_second" => Ok(TruncationStrategy::OnlySecond), _ => Err(PyError(format!( "Unknown `strategy`: `{}`. Use \ one of `longest_first`, `only_first`, or `only_second`", value )) .into_pyerr::<exceptions::PyValueError>()), }? } "direction" => { let value: &str = value.extract()?; params.direction = match value { "left" => Ok(TruncationDirection::Left), "right" => Ok(TruncationDirection::Right), _ => Err(PyError(format!( "Unknown `direction`: `{}`. Use \ one of `left` or `right`.", value )) .into_pyerr::<exceptions::PyValueError>()), }? } _ => println!("Ignored unknown kwarg option {}", key), } } } if let Err(error_message) = self.tokenizer.with_truncation(Some(params)) { return Err(PyError(error_message.to_string()).into_pyerr::<exceptions::PyValueError>()); } Ok(()) } /// Disable truncation #[pyo3(text_signature = "(self)")] fn no_truncation(&mut self) { self.tokenizer .with_truncation(None) .expect("Failed to set truncation to `None`! This should never happen"); } /// Get the currently set truncation parameters /// /// `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead` /// /// Returns: /// (:obj:`dict`, `optional`): /// A dict with the current truncation parameters if truncation is enabled #[getter] fn get_truncation<'py>(&self, py: Python<'py>) -> PyResult<Option<&'py PyDict>> { self.tokenizer.get_truncation().map_or(Ok(None), |params| { let dict = PyDict::new(py); dict.set_item("max_length", params.max_length)?; dict.set_item("stride", params.stride)?; dict.set_item("strategy", params.strategy.as_ref())?; dict.set_item("direction", params.direction.as_ref())?; Ok(Some(dict)) }) } /// Enable the padding /// /// Args: /// direction (:obj:`str`, `optional`, defaults to :obj:`right`): /// The direction in which to pad. Can be either ``right`` or ``left`` /// /// pad_to_multiple_of (:obj:`int`, `optional`): /// If specified, the padding length should always snap to the next multiple of the /// given value. For example if we were going to pad witha length of 250 but /// ``pad_to_multiple_of=8`` then we will pad to 256. /// /// pad_id (:obj:`int`, defaults to 0): /// The id to be used when padding /// /// pad_type_id (:obj:`int`, defaults to 0): /// The type id to be used when padding /// /// pad_token (:obj:`str`, defaults to :obj:`[PAD]`): /// The pad token to be used when padding /// /// length (:obj:`int`, `optional`): /// If specified, the length at which to pad. If not specified we pad using the size of /// the longest sequence in a batch. #[pyo3(signature = (**kwargs))] #[pyo3( text_signature = "(self, direction='right', pad_id=0, pad_type_id=0, pad_token='[PAD]', length=None, pad_to_multiple_of=None)" )] fn enable_padding(&mut self, kwargs: Option<&PyDict>) -> PyResult<()> { let mut params = PaddingParams::default(); if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "direction" => { let value: &str = value.extract()?; params.direction = match value { "left" => Ok(PaddingDirection::Left), "right" => Ok(PaddingDirection::Right), other => Err(PyError(format!( "Unknown `direction`: `{}`. Use \ one of `left` or `right`", other )) .into_pyerr::<exceptions::PyValueError>()), }?; } "pad_to_multiple_of" => { if let Some(multiple) = value.extract()? { params.pad_to_multiple_of = multiple; } } "pad_id" => params.pad_id = value.extract()?, "pad_type_id" => params.pad_type_id = value.extract()?, "pad_token" => params.pad_token = value.extract()?, "max_length" => { println!( "enable_padding(max_length=X) is deprecated, \ use enable_padding(length=X) instead" ); if let Some(l) = value.extract()? { params.strategy = PaddingStrategy::Fixed(l); } else { params.strategy = PaddingStrategy::BatchLongest; } } "length" => { if let Some(l) = value.extract()? { params.strategy = PaddingStrategy::Fixed(l); } else { params.strategy = PaddingStrategy::BatchLongest; } } _ => println!("Ignored unknown kwarg option {}", key), } } } self.tokenizer.with_padding(Some(params)); Ok(()) } /// Disable padding #[pyo3(text_signature = "(self)")] fn no_padding(&mut self) { self.tokenizer.with_padding(None); } /// Get the current padding parameters /// /// `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead` /// /// Returns: /// (:obj:`dict`, `optional`): /// A dict with the current padding parameters if padding is enabled #[getter] fn get_padding<'py>(&self, py: Python<'py>) -> PyResult<Option<&'py PyDict>> { self.tokenizer.get_padding().map_or(Ok(None), |params| { let dict = PyDict::new(py); dict.set_item( "length", match params.strategy { tk::PaddingStrategy::BatchLongest => None, tk::PaddingStrategy::Fixed(size) => Some(size), }, )?; dict.set_item("pad_to_multiple_of", params.pad_to_multiple_of)?; dict.set_item("pad_id", params.pad_id)?; dict.set_item("pad_token", &params.pad_token)?; dict.set_item("pad_type_id", params.pad_type_id)?; dict.set_item("direction", params.direction.as_ref())?; Ok(Some(dict)) }) } /// Encode the given sequence and pair. This method can process raw text sequences /// as well as already pre-tokenized sequences. /// /// Example: /// Here are some examples of the inputs that are accepted:: /// /// encode("A single sequence")` /// encode("A sequence", "And its pair")` /// encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)` /// encode( /// [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ], /// is_pretokenized=True /// ) /// /// Args: /// sequence (:obj:`~tokenizers.InputSequence`): /// The main input sequence we want to encode. This sequence can be either raw /// text or pre-tokenized, according to the ``is_pretokenized`` argument: /// /// - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence` /// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence` /// /// pair (:obj:`~tokenizers.InputSequence`, `optional`): /// An optional input sequence. The expected format is the same that for ``sequence``. /// /// is_pretokenized (:obj:`bool`, defaults to :obj:`False`): /// Whether the input is already pre-tokenized /// /// add_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to add the special tokens /// /// Returns: /// :class:`~tokenizers.Encoding`: The encoded result /// #[pyo3(signature = (sequence, pair = None, is_pretokenized = false, add_special_tokens = true))] #[pyo3( text_signature = "(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True)" )] fn encode( &self, sequence: &PyAny, pair: Option<&PyAny>, is_pretokenized: bool, add_special_tokens: bool, ) -> PyResult<PyEncoding> { let sequence: tk::InputSequence = if is_pretokenized { sequence.extract::<PreTokenizedInputSequence>()?.into() } else { sequence.extract::<TextInputSequence>()?.into() }; let input = match pair { Some(pair) => { let pair: tk::InputSequence = if is_pretokenized { pair.extract::<PreTokenizedInputSequence>()?.into() } else { pair.extract::<TextInputSequence>()?.into() }; tk::EncodeInput::Dual(sequence, pair) } None => tk::EncodeInput::Single(sequence), }; ToPyResult( self.tokenizer .encode_char_offsets(input, add_special_tokens) .map(|e| e.into()), ) .into() } /// Encode the given batch of inputs. This method accept both raw text sequences /// as well as already pre-tokenized sequences. /// /// Example: /// Here are some examples of the inputs that are accepted:: /// /// encode_batch([ /// "A single sequence", /// ("A tuple with a sequence", "And its pair"), /// [ "A", "pre", "tokenized", "sequence" ], /// ([ "A", "pre", "tokenized", "sequence" ], "And its pair") /// ]) /// /// Args: /// input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`): /// A list of single sequences or pair sequences to encode. Each sequence /// can be either raw text or pre-tokenized, according to the ``is_pretokenized`` /// argument: /// /// - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput` /// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput` /// /// is_pretokenized (:obj:`bool`, defaults to :obj:`False`): /// Whether the input is already pre-tokenized /// /// add_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to add the special tokens /// /// Returns: /// A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch /// #[pyo3(signature = (input, is_pretokenized = false, add_special_tokens = true))] #[pyo3(text_signature = "(self, input, is_pretokenized=False, add_special_tokens=True)")] fn encode_batch( &self, py: Python<'_>, input: Vec<&PyAny>, is_pretokenized: bool, add_special_tokens: bool, ) -> PyResult<Vec<PyEncoding>> { let input: Vec<tk::EncodeInput> = input .into_iter() .map(|o| { let input: tk::EncodeInput = if is_pretokenized { o.extract::<PreTokenizedEncodeInput>()?.into() } else { o.extract::<TextEncodeInput>()?.into() }; Ok(input) }) .collect::<PyResult<Vec<tk::EncodeInput>>>()?; py.allow_threads(|| { ToPyResult( self.tokenizer .encode_batch_char_offsets(input, add_special_tokens) .map(|encodings| encodings.into_iter().map(|e| e.into()).collect()), ) .into() }) } /// Decode the given list of ids back to a string /// /// This is used to decode anything coming back from a Language Model /// /// Args: /// ids (A :obj:`List/Tuple` of :obj:`int`): /// The list of ids that we want to decode /// /// skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether the special tokens should be removed from the decoded string /// /// Returns: /// :obj:`str`: The decoded string #[pyo3(signature = (ids, skip_special_tokens = true))] #[pyo3(text_signature = "(self, ids, skip_special_tokens=True)")] fn decode(&self, ids: Vec<u32>, skip_special_tokens: bool) -> PyResult<String> { ToPyResult(self.tokenizer.decode(&ids, skip_special_tokens)).into() } /// Decode a batch of ids back to their corresponding string /// /// Args: /// sequences (:obj:`List` of :obj:`List[int]`): /// The batch of sequences we want to decode /// /// skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether the special tokens should be removed from the decoded strings /// /// Returns: /// :obj:`List[str]`: A list of decoded strings #[pyo3(signature = (sequences, skip_special_tokens = true))] #[pyo3(text_signature = "(self, sequences, skip_special_tokens=True)")] fn decode_batch( &self, py: Python<'_>, sequences: Vec<Vec<u32>>, skip_special_tokens: bool, ) -> PyResult<Vec<String>> { py.allow_threads(|| { let slices = sequences.iter().map(|v| &v[..]).collect::<Vec<&[u32]>>(); ToPyResult(self.tokenizer.decode_batch(&slices, skip_special_tokens)).into() }) } /// Convert the given token to its corresponding id if it exists /// /// Args: /// token (:obj:`str`): /// The token to convert /// /// Returns: /// :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary #[pyo3(text_signature = "(self, token)")] fn token_to_id(&self, token: &str) -> Option<u32> { self.tokenizer.token_to_id(token) } /// Convert the given id to its corresponding token if it exists /// /// Args: /// id (:obj:`int`): /// The id to convert /// /// Returns: /// :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary #[pyo3(text_signature = "(self, id)")] fn id_to_token(&self, id: u32) -> Option<String> { self.tokenizer.id_to_token(id) } /// Add the given tokens to the vocabulary /// /// The given tokens are added only if they don't already exist in the vocabulary. /// Each token then gets a new attributed id. /// /// Args: /// tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): /// The list of tokens we want to add to the vocabulary. Each token can be either a /// string or an instance of :class:`~tokenizers.AddedToken` for more customization. /// /// Returns: /// :obj:`int`: The number of tokens that were created in the vocabulary #[pyo3(text_signature = "(self, tokens)")] fn add_tokens(&mut self, tokens: &PyList) -> PyResult<usize> { let tokens = tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(false)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = false; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Input must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?; Ok(self.tokenizer.add_tokens(&tokens)) } /// Add the given special tokens to the Tokenizer. /// /// If these tokens are already part of the vocabulary, it just let the Tokenizer know about /// them. If they don't exist, the Tokenizer creates them, giving them a new id. /// /// These special tokens will never be processed by the model (ie won't be split into /// multiple tokens), and they can be removed from the output when decoding. /// /// Args: /// tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): /// The list of special tokens we want to add to the vocabulary. Each token can either /// be a string or an instance of :class:`~tokenizers.AddedToken` for more /// customization. /// /// Returns: /// :obj:`int`: The number of tokens that were created in the vocabulary #[pyo3(text_signature = "(self, tokens)")] fn add_special_tokens(&mut self, tokens: &PyList) -> PyResult<usize> { let tokens = tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Input must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?; Ok(self.tokenizer.add_special_tokens(&tokens)) } /// Train the Tokenizer using the given files. /// /// Reads the files line by line, while keeping all the whitespace, even new lines. /// If you want to train from data store in-memory, you can check /// :meth:`~tokenizers.Tokenizer.train_from_iterator` /// /// Args: /// files (:obj:`List[str]`): /// A list of path to the files that we should use for training /// /// trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): /// An optional trainer that should be used to train our Model #[pyo3(signature = (files, trainer = None))] #[pyo3(text_signature = "(self, files, trainer = None)")] fn train(&mut self, files: Vec<String>, trainer: Option<&mut PyTrainer>) -> PyResult<()> { let mut trainer = trainer.map_or_else(|| self.tokenizer.get_model().get_trainer(), |t| t.clone()); Python::with_gil(|py| { py.allow_threads(|| { ToPyResult( self.tokenizer .train_from_files(&mut trainer, files) .map(|_| {}), ) .into() }) }) } /// Train the Tokenizer using the provided iterator. /// /// You can provide anything that is a Python Iterator /// /// * A list of sequences :obj:`List[str]` /// * A generator that yields :obj:`str` or :obj:`List[str]` /// * A Numpy array of strings /// * ... /// /// Args: /// iterator (:obj:`Iterator`): /// Any iterator over strings or list of strings /// /// trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): /// An optional trainer that should be used to train our Model /// /// length (:obj:`int`, `optional`): /// The total number of sequences in the iterator. This is used to /// provide meaningful progress tracking #[pyo3(signature = (iterator, trainer = None, length = None))] #[pyo3(text_signature = "(self, iterator, trainer=None, length=None)")] fn train_from_iterator( &mut self, py: Python, iterator: &PyAny, trainer: Option<&mut PyTrainer>, length: Option<usize>, ) -> PyResult<()> { let mut trainer = trainer.map_or_else(|| self.tokenizer.get_model().get_trainer(), |t| t.clone()); let buffered_iter = PyBufferedIterator::new( iterator, |element| { // Each element of the iterator can either be: // - An iterator, to allow batching // - A string if let Ok(s) = element.downcast::<PyString>() { itertools::Either::Right(std::iter::once(s.to_str().map(|s| s.to_owned()))) } else { match element.iter() { Ok(iter) => itertools::Either::Left( iter.map(|i| i?.extract::<String>()) .collect::<Vec<_>>() .into_iter(), ), Err(e) => itertools::Either::Right(std::iter::once(Err(e))), } } }, 256, )?; py.allow_threads(|| { ResultShunt::process(buffered_iter, |iter| { self.tokenizer .train(&mut trainer, MaybeSizedIterator::new(iter, length)) .map(|_| {}) .map_err(|e| exceptions::PyException::new_err(e.to_string())) })? }) } /// Apply all the post-processing steps to the given encodings. /// /// The various steps are: /// /// 1. Truncate according to the set truncation params (provided with /// :meth:`~tokenizers.Tokenizer.enable_truncation`) /// 2. Apply the :class:`~tokenizers.processors.PostProcessor` /// 3. Pad according to the set padding params (provided with /// :meth:`~tokenizers.Tokenizer.enable_padding`) /// /// Args: /// encoding (:class:`~tokenizers.Encoding`): /// The :class:`~tokenizers.Encoding` corresponding to the main sequence. /// /// pair (:class:`~tokenizers.Encoding`, `optional`): /// An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence. /// /// add_special_tokens (:obj:`bool`): /// Whether to add the special tokens /// /// Returns: /// :class:`~tokenizers.Encoding`: The final post-processed encoding #[pyo3(signature = (encoding, pair = None, add_special_tokens = true))] #[pyo3(text_signature = "(self, encoding, pair=None, add_special_tokens=True)")] fn post_process( &self, encoding: &PyEncoding, pair: Option<&PyEncoding>, add_special_tokens: bool, ) -> PyResult<PyEncoding> { ToPyResult( self.tokenizer .post_process( encoding.encoding.clone(), pair.map(|p| p.encoding.clone()), add_special_tokens, ) .map(|e| e.into()), ) .into() } /// The :class:`~tokenizers.models.Model` in use by the Tokenizer #[getter] fn get_model(&self, py: Python<'_>) -> PyResult<PyObject> { self.tokenizer.get_model().get_as_subtype(py) } /// Set the :class:`~tokenizers.models.Model` #[setter] fn set_model(&mut self, model: PyRef<PyModel>) { self.tokenizer.with_model(model.clone()); } /// The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer #[getter] fn get_normalizer(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(n) = self.tokenizer.get_normalizer() { n.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.normalizers.Normalizer` #[setter] fn set_normalizer(&mut self, normalizer: PyRef<PyNormalizer>) { self.tokenizer.with_normalizer(normalizer.clone()); } /// The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer #[getter] fn get_pre_tokenizer(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(pt) = self.tokenizer.get_pre_tokenizer() { pt.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.normalizers.Normalizer` #[setter] fn set_pre_tokenizer(&mut self, pretok: PyRef<PyPreTokenizer>) { self.tokenizer.with_pre_tokenizer(pretok.clone()); } /// The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer #[getter] fn get_post_processor(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(n) = self.tokenizer.get_post_processor() { n.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.processors.PostProcessor` #[setter] fn set_post_processor(&mut self, processor: PyRef<PyPostProcessor>) { self.tokenizer.with_post_processor(processor.clone()); } /// The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer #[getter] fn get_decoder(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(dec) = self.tokenizer.get_decoder() { dec.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.decoders.Decoder` #[setter] fn set_decoder(&mut self, decoder: PyRef<PyDecoder>) { self.tokenizer.with_decoder(decoder.clone()); } } #[cfg(test)] mod test { use super::*; use crate::models::PyModel; use crate::normalizers::{PyNormalizer, PyNormalizerTypeWrapper}; use std::sync::{Arc, RwLock}; use tempfile::NamedTempFile; use tk::normalizers::{Lowercase, NFKC}; #[test] fn serialize() { let mut tokenizer = Tokenizer::new(PyModel::from(BPE::default())); tokenizer.with_normalizer(PyNormalizer::new(PyNormalizerTypeWrapper::Sequence(vec![ Arc::new(RwLock::new(NFKC.into())), Arc::new(RwLock::new(Lowercase.into())), ]))); let tmp = NamedTempFile::new().unwrap().into_temp_path(); tokenizer.save(&tmp, false).unwrap(); Tokenizer::from_file(&tmp).unwrap(); } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/error.rs
use pyo3::exceptions; use pyo3::prelude::*; use pyo3::type_object::PyTypeInfo; use std::fmt::{Display, Formatter, Result as FmtResult}; use tokenizers::tokenizer::Result; #[derive(Debug)] pub struct PyError(pub String); impl PyError { #[allow(dead_code)] pub fn from(s: &str) -> Self { PyError(String::from(s)) } pub fn into_pyerr<T: PyTypeInfo>(self) -> PyErr { PyErr::new::<T, _>(format!("{}", self)) } } impl Display for PyError { fn fmt(&self, fmt: &mut Formatter) -> FmtResult { write!(fmt, "{}", self.0) } } impl std::error::Error for PyError {} pub struct ToPyResult<T>(pub Result<T>); impl<T> From<ToPyResult<T>> for PyResult<T> { fn from(v: ToPyResult<T>) -> Self { v.0.map_err(|e| exceptions::PyException::new_err(format!("{}", e))) } } impl<T> ToPyResult<T> { pub fn into_py(self) -> PyResult<T> { self.into() } } pub(crate) fn deprecation_warning(py: Python<'_>, version: &str, message: &str) -> PyResult<()> { let deprecation_warning = py.import("builtins")?.getattr("DeprecationWarning")?; let full_message = format!("Deprecated in {}: {}", version, message); pyo3::PyErr::warn(py, deprecation_warning, &full_message, 0) }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/normalizers.rs
use std::sync::{Arc, RwLock}; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use crate::error::ToPyResult; use crate::utils::{PyNormalizedString, PyNormalizedStringRefMut, PyPattern}; use serde::ser::SerializeStruct; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use tk::normalizers::{ BertNormalizer, Lowercase, Nmt, NormalizerWrapper, Precompiled, Prepend, Replace, Strip, StripAccents, NFC, NFD, NFKC, NFKD, }; use tk::{NormalizedString, Normalizer}; use tokenizers as tk; /// Represents the different kind of NormalizedString we can receive from Python: /// - Owned: Created in Python and owned by Python /// - RefMut: A mutable reference to a NormalizedString owned by Rust #[derive(FromPyObject)] enum PyNormalizedStringMut<'p> { Owned(PyRefMut<'p, PyNormalizedString>), RefMut(PyNormalizedStringRefMut), } impl PyNormalizedStringMut<'_> { /// Normalized the underlying `NormalizedString` using the provided normalizer pub fn normalize_with<N>(&mut self, normalizer: &N) -> PyResult<()> where N: Normalizer, { match self { PyNormalizedStringMut::Owned(ref mut n) => normalizer.normalize(&mut n.normalized), PyNormalizedStringMut::RefMut(n) => n.map_as_mut(|n| normalizer.normalize(n))?, } .map_err(|e| exceptions::PyException::new_err(format!("{}", e))) } } /// Base class for all normalizers /// /// This class is not supposed to be instantiated directly. Instead, any implementation of a /// Normalizer will return an instance of this class when instantiated. #[pyclass(dict, module = "tokenizers.normalizers", name = "Normalizer", subclass)] #[derive(Clone, Serialize, Deserialize)] pub struct PyNormalizer { #[serde(flatten)] pub(crate) normalizer: PyNormalizerTypeWrapper, } impl PyNormalizer { pub(crate) fn new(normalizer: PyNormalizerTypeWrapper) -> Self { PyNormalizer { normalizer } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match self.normalizer { PyNormalizerTypeWrapper::Sequence(_) => Py::new(py, (PySequence {}, base))?.into_py(py), PyNormalizerTypeWrapper::Single(ref inner) => match &*inner.as_ref().read().unwrap() { PyNormalizerWrapper::Custom(_) => Py::new(py, base)?.into_py(py), PyNormalizerWrapper::Wrapped(ref inner) => match inner { NormalizerWrapper::Sequence(_) => { Py::new(py, (PySequence {}, base))?.into_py(py) } NormalizerWrapper::BertNormalizer(_) => { Py::new(py, (PyBertNormalizer {}, base))?.into_py(py) } NormalizerWrapper::StripNormalizer(_) => { Py::new(py, (PyBertNormalizer {}, base))?.into_py(py) } NormalizerWrapper::Prepend(_) => Py::new(py, (PyPrepend {}, base))?.into_py(py), NormalizerWrapper::StripAccents(_) => { Py::new(py, (PyStripAccents {}, base))?.into_py(py) } NormalizerWrapper::NFC(_) => Py::new(py, (PyNFC {}, base))?.into_py(py), NormalizerWrapper::NFD(_) => Py::new(py, (PyNFD {}, base))?.into_py(py), NormalizerWrapper::NFKC(_) => Py::new(py, (PyNFKC {}, base))?.into_py(py), NormalizerWrapper::NFKD(_) => Py::new(py, (PyNFKD {}, base))?.into_py(py), NormalizerWrapper::Lowercase(_) => { Py::new(py, (PyLowercase {}, base))?.into_py(py) } NormalizerWrapper::Precompiled(_) => { Py::new(py, (PyPrecompiled {}, base))?.into_py(py) } NormalizerWrapper::Replace(_) => Py::new(py, (PyReplace {}, base))?.into_py(py), NormalizerWrapper::Nmt(_) => Py::new(py, (PyNmt {}, base))?.into_py(py), }, }, }) } } impl Normalizer for PyNormalizer { fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> { self.normalizer.normalize(normalized) } } #[pymethods] impl PyNormalizer { #[staticmethod] fn custom(obj: PyObject) -> Self { Self { normalizer: PyNormalizerWrapper::Custom(CustomNormalizer::new(obj)).into(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.normalizer).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Normalizer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.normalizer = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Normalizer: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Normalize a :class:`~tokenizers.NormalizedString` in-place /// /// This method allows to modify a :class:`~tokenizers.NormalizedString` to /// keep track of the alignment information. If you just want to see the result /// of the normalization on a raw string, you can use /// :meth:`~tokenizers.normalizers.Normalizer.normalize_str` /// /// Args: /// normalized (:class:`~tokenizers.NormalizedString`): /// The normalized string on which to apply this /// :class:`~tokenizers.normalizers.Normalizer` #[pyo3(text_signature = "(self, normalized)")] fn normalize(&self, mut normalized: PyNormalizedStringMut) -> PyResult<()> { normalized.normalize_with(&self.normalizer) } /// Normalize the given string /// /// This method provides a way to visualize the effect of a /// :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment /// information. If you need to get/convert offsets, you can use /// :meth:`~tokenizers.normalizers.Normalizer.normalize` /// /// Args: /// sequence (:obj:`str`): /// A string to normalize /// /// Returns: /// :obj:`str`: A string after normalization #[pyo3(text_signature = "(self, sequence)")] fn normalize_str(&self, sequence: &str) -> PyResult<String> { let mut normalized = NormalizedString::from(sequence); ToPyResult(self.normalizer.normalize(&mut normalized)).into_py()?; Ok(normalized.get().to_owned()) } } macro_rules! getter { ($self: ident, $variant: ident, $name: ident) => {{ let super_ = $self.as_ref(); if let PyNormalizerTypeWrapper::Single(ref norm) = super_.normalizer { let wrapper = norm.read().unwrap(); if let PyNormalizerWrapper::Wrapped(NormalizerWrapper::$variant(o)) = (*wrapper).clone() { o.$name } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyNormalizerTypeWrapper::Single(ref norm) = super_.normalizer { let mut wrapper = norm.write().unwrap(); if let PyNormalizerWrapper::Wrapped(NormalizerWrapper::$variant(ref mut o)) = *wrapper { o.$name = $value; } } }}; } /// BertNormalizer /// /// Takes care of normalizing raw text before giving it to a Bert model. /// This includes cleaning the text, handling accents, chinese chars and lowercasing /// /// Args: /// clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to clean the text, by removing any control characters /// and replacing all whitespaces by the classic one. /// /// handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to handle chinese chars by putting spaces around them. /// /// strip_accents (:obj:`bool`, `optional`): /// Whether to strip all accents. If this option is not specified (ie == None), /// then it will be determined by the value for `lowercase` (as in the original Bert). /// /// lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to lowercase. #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "BertNormalizer")] pub struct PyBertNormalizer {} #[pymethods] impl PyBertNormalizer { #[getter] fn get_clean_text(self_: PyRef<Self>) -> bool { getter!(self_, BertNormalizer, clean_text) } #[setter] fn set_clean_text(self_: PyRef<Self>, clean_text: bool) { setter!(self_, BertNormalizer, clean_text, clean_text); } #[getter] fn get_handle_chinese_chars(self_: PyRef<Self>) -> bool { getter!(self_, BertNormalizer, handle_chinese_chars) } #[setter] fn set_handle_chinese_chars(self_: PyRef<Self>, handle_chinese_chars: bool) { setter!( self_, BertNormalizer, handle_chinese_chars, handle_chinese_chars ); } #[getter] fn get_strip_accents(self_: PyRef<Self>) -> Option<bool> { getter!(self_, BertNormalizer, strip_accents) } #[setter] fn set_strip_accents(self_: PyRef<Self>, strip_accents: Option<bool>) { setter!(self_, BertNormalizer, strip_accents, strip_accents); } #[getter] fn get_lowercase(self_: PyRef<Self>) -> bool { getter!(self_, BertNormalizer, lowercase) } #[setter] fn set_lowercase(self_: PyRef<Self>, lowercase: bool) { setter!(self_, BertNormalizer, lowercase, lowercase) } #[new] #[pyo3(signature = ( clean_text = true, handle_chinese_chars = true, strip_accents = None, lowercase = true ), text_signature = "(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True)")] fn new( clean_text: bool, handle_chinese_chars: bool, strip_accents: Option<bool>, lowercase: bool, ) -> (Self, PyNormalizer) { let normalizer = BertNormalizer::new(clean_text, handle_chinese_chars, strip_accents, lowercase); (PyBertNormalizer {}, normalizer.into()) } } /// NFD Unicode Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFD")] pub struct PyNFD {} #[pymethods] impl PyNFD { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyNFD {}, PyNormalizer::new(NFD.into())) } } /// NFKD Unicode Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFKD")] pub struct PyNFKD {} #[pymethods] impl PyNFKD { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyNFKD {}, NFKD.into()) } } /// NFC Unicode Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFC")] pub struct PyNFC {} #[pymethods] impl PyNFC { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyNFC {}, NFC.into()) } } /// NFKC Unicode Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFKC")] pub struct PyNFKC {} #[pymethods] impl PyNFKC { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyNFKC {}, NFKC.into()) } } /// Allows concatenating multiple other Normalizer as a Sequence. /// All the normalizers run in sequence in the given order /// /// Args: /// normalizers (:obj:`List[Normalizer]`): /// A list of Normalizer to be run as a sequence #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Sequence")] pub struct PySequence {} #[pymethods] impl PySequence { #[new] #[pyo3(text_signature = None)] fn new(normalizers: &PyList) -> PyResult<(Self, PyNormalizer)> { let mut sequence = Vec::with_capacity(normalizers.len()); for n in normalizers.iter() { let normalizer: PyRef<PyNormalizer> = n.extract()?; match &normalizer.normalizer { PyNormalizerTypeWrapper::Sequence(inner) => sequence.extend(inner.iter().cloned()), PyNormalizerTypeWrapper::Single(inner) => sequence.push(inner.clone()), } } Ok(( PySequence {}, PyNormalizer::new(PyNormalizerTypeWrapper::Sequence(sequence)), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } fn __len__(&self) -> usize { 0 } } /// Lowercase Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Lowercase")] pub struct PyLowercase {} #[pymethods] impl PyLowercase { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyLowercase {}, Lowercase.into()) } } /// Strip normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Strip")] pub struct PyStrip {} #[pymethods] impl PyStrip { #[getter] fn get_left(self_: PyRef<Self>) -> bool { getter!(self_, StripNormalizer, strip_left) } #[setter] fn set_left(self_: PyRef<Self>, left: bool) { setter!(self_, StripNormalizer, strip_left, left) } #[getter] fn get_right(self_: PyRef<Self>) -> bool { getter!(self_, StripNormalizer, strip_right) } #[setter] fn set_right(self_: PyRef<Self>, right: bool) { setter!(self_, StripNormalizer, strip_right, right) } #[new] #[pyo3(signature = (left = true, right = true), text_signature = "(self, left=True, right=True)")] fn new(left: bool, right: bool) -> (Self, PyNormalizer) { (PyStrip {}, Strip::new(left, right).into()) } } /// Prepend normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Prepend")] pub struct PyPrepend {} #[pymethods] impl PyPrepend { #[getter] fn get_prepend(self_: PyRef<Self>) -> String { getter!(self_, Prepend, prepend) } #[setter] fn set_prepend(self_: PyRef<Self>, prepend: String) { setter!(self_, Prepend, prepend, prepend) } #[new] #[pyo3(signature = (prepend="▁".to_string()), text_signature = "(self, prepend)")] fn new(prepend: String) -> (Self, PyNormalizer) { (PyPrepend {}, Prepend::new(prepend).into()) } } /// StripAccents normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "StripAccents")] pub struct PyStripAccents {} #[pymethods] impl PyStripAccents { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyStripAccents {}, StripAccents.into()) } } /// Nmt normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Nmt")] pub struct PyNmt {} #[pymethods] impl PyNmt { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyNmt {}, Nmt.into()) } } /// Precompiled normalizer /// Don't use manually it is used for compatiblity for SentencePiece. #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Precompiled")] pub struct PyPrecompiled {} #[pymethods] impl PyPrecompiled { #[new] #[pyo3(text_signature = "(self, precompiled_charsmap)")] fn new(py_precompiled_charsmap: &PyBytes) -> PyResult<(Self, PyNormalizer)> { let precompiled_charsmap: &[u8] = FromPyObject::extract(py_precompiled_charsmap)?; Ok(( PyPrecompiled {}, Precompiled::from(precompiled_charsmap) .map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to build Precompiled normalizer: {}", e )) })? .into(), )) } } /// Replace normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Replace")] pub struct PyReplace {} #[pymethods] impl PyReplace { #[new] #[pyo3(text_signature = "(self, pattern, content)")] fn new(pattern: PyPattern, content: String) -> PyResult<(Self, PyNormalizer)> { Ok(( PyReplace {}, ToPyResult(Replace::new(pattern, content)).into_py()?.into(), )) } } #[derive(Debug, Clone)] pub(crate) struct CustomNormalizer { inner: PyObject, } impl CustomNormalizer { pub fn new(inner: PyObject) -> Self { Self { inner } } } impl tk::tokenizer::Normalizer for CustomNormalizer { fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> { Python::with_gil(|py| { let normalized = PyNormalizedStringRefMut::new(normalized); let py_normalized = self.inner.as_ref(py); py_normalized.call_method("normalize", (normalized.get(),), None)?; Ok(()) }) } } impl Serialize for CustomNormalizer { fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { Err(serde::ser::Error::custom( "Custom Normalizer cannot be serialized", )) } } impl<'de> Deserialize<'de> for CustomNormalizer { fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { Err(serde::de::Error::custom( "Custom Normalizer cannot be deserialized", )) } } #[derive(Debug, Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyNormalizerWrapper { Custom(CustomNormalizer), Wrapped(NormalizerWrapper), } impl Serialize for PyNormalizerWrapper { fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error> where S: Serializer, { match self { PyNormalizerWrapper::Wrapped(inner) => inner.serialize(serializer), PyNormalizerWrapper::Custom(inner) => inner.serialize(serializer), } } } #[derive(Debug, Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyNormalizerTypeWrapper { Sequence(Vec<Arc<RwLock<PyNormalizerWrapper>>>), Single(Arc<RwLock<PyNormalizerWrapper>>), } impl Serialize for PyNormalizerTypeWrapper { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self { PyNormalizerTypeWrapper::Sequence(seq) => { let mut ser = serializer.serialize_struct("Sequence", 2)?; ser.serialize_field("type", "Sequence")?; ser.serialize_field("normalizers", seq)?; ser.end() } PyNormalizerTypeWrapper::Single(inner) => inner.serialize(serializer), } } } impl<I> From<I> for PyNormalizerWrapper where I: Into<NormalizerWrapper>, { fn from(norm: I) -> Self { PyNormalizerWrapper::Wrapped(norm.into()) } } impl<I> From<I> for PyNormalizerTypeWrapper where I: Into<PyNormalizerWrapper>, { fn from(norm: I) -> Self { PyNormalizerTypeWrapper::Single(Arc::new(RwLock::new(norm.into()))) } } impl<I> From<I> for PyNormalizer where I: Into<NormalizerWrapper>, { fn from(norm: I) -> Self { PyNormalizer { normalizer: norm.into().into(), } } } impl Normalizer for PyNormalizerTypeWrapper { fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> { match self { PyNormalizerTypeWrapper::Single(inner) => inner.read().unwrap().normalize(normalized), PyNormalizerTypeWrapper::Sequence(inner) => inner .iter() .try_for_each(|n| n.read().unwrap().normalize(normalized)), } } } impl Normalizer for PyNormalizerWrapper { fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> { match self { PyNormalizerWrapper::Wrapped(inner) => inner.normalize(normalized), PyNormalizerWrapper::Custom(inner) => inner.normalize(normalized), } } } /// Normalizers Module #[pymodule] pub fn normalizers(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyNormalizer>()?; m.add_class::<PyBertNormalizer>()?; m.add_class::<PyNFD>()?; m.add_class::<PyNFKD>()?; m.add_class::<PyNFC>()?; m.add_class::<PyNFKC>()?; m.add_class::<PySequence>()?; m.add_class::<PyLowercase>()?; m.add_class::<PyStrip>()?; m.add_class::<PyStripAccents>()?; m.add_class::<PyPrepend>()?; m.add_class::<PyNmt>()?; m.add_class::<PyPrecompiled>()?; m.add_class::<PyReplace>()?; Ok(()) } #[cfg(test)] mod test { use pyo3::prelude::*; use tk::normalizers::unicode::{NFC, NFKC}; use tk::normalizers::utils::Sequence; use tk::normalizers::NormalizerWrapper; use crate::normalizers::{PyNormalizer, PyNormalizerTypeWrapper, PyNormalizerWrapper}; #[test] fn get_subtype() { Python::with_gil(|py| { let py_norm = PyNormalizer::new(NFC.into()); let py_nfc = py_norm.get_as_subtype(py).unwrap(); assert_eq!("NFC", py_nfc.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let py_wrapped: PyNormalizerWrapper = NFKC.into(); let py_ser = serde_json::to_string(&py_wrapped).unwrap(); let rs_wrapped = NormalizerWrapper::NFKC(NFKC); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_ser, rs_ser); let py_norm: PyNormalizer = serde_json::from_str(&rs_ser).unwrap(); match py_norm.normalizer { PyNormalizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PyNormalizerWrapper::Wrapped(NormalizerWrapper::NFKC(_)) => {} _ => panic!("Expected NFKC"), }, _ => panic!("Expected wrapped, not sequence."), } let py_seq: PyNormalizerWrapper = Sequence::new(vec![NFC.into(), NFKC.into()]).into(); let py_wrapper_ser = serde_json::to_string(&py_seq).unwrap(); let rs_wrapped = NormalizerWrapper::Sequence(Sequence::new(vec![NFC.into(), NFKC.into()])); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_wrapper_ser, rs_ser); let py_seq = PyNormalizer::new(py_seq.into()); let py_ser = serde_json::to_string(&py_seq).unwrap(); assert_eq!(py_wrapper_ser, py_ser); let rs_seq = Sequence::new(vec![NFC.into(), NFKC.into()]); let rs_ser = serde_json::to_string(&rs_seq).unwrap(); assert_eq!(py_wrapper_ser, rs_ser); } #[test] fn deserialize_sequence() { let string = r#"{"type": "NFKC"}"#; let normalizer: PyNormalizer = serde_json::from_str(string).unwrap(); match normalizer.normalizer { PyNormalizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PyNormalizerWrapper::Wrapped(NormalizerWrapper::NFKC(_)) => {} _ => panic!("Expected NFKC"), }, _ => panic!("Expected wrapped, not sequence."), } let sequence_string = format!(r#"{{"type": "Sequence", "normalizers": [{}]}}"#, string); let normalizer: PyNormalizer = serde_json::from_str(&sequence_string).unwrap(); match normalizer.normalizer { PyNormalizerTypeWrapper::Single(inner) => match &*inner.as_ref().read().unwrap() { PyNormalizerWrapper::Wrapped(NormalizerWrapper::Sequence(sequence)) => { let normalizers = sequence.get_normalizers(); assert_eq!(normalizers.len(), 1); match normalizers[0] { NormalizerWrapper::NFKC(_) => {} _ => panic!("Expected NFKC"), } } _ => panic!("Expected sequence"), }, _ => panic!("Expected single"), }; } }
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/pretokenization.rs
use tokenizers as tk; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use super::{ DestroyPtr, PyNormalizedString, PyNormalizedStringRefMut, RefMutContainer, RefMutGuard, }; use crate::encoding::PyEncoding; use crate::error::ToPyResult; use crate::token::PyToken; use tk::{OffsetReferential, OffsetType, Offsets, PreTokenizedString, Token}; fn split(pretok: &mut PreTokenizedString, func: &PyAny) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`split` expect a callable with the signature: \ `fn(index: int, normalized: NormalizedString) -> List[NormalizedString]`", )) } else { ToPyResult(pretok.split(|i, normalized| { let output = func.call((i, PyNormalizedString::from(normalized)), None)?; Ok(output .extract::<Vec<PyNormalizedString>>()? .into_iter() .map(tk::NormalizedString::from)) })) .into() } } fn normalize(pretok: &mut PreTokenizedString, func: &PyAny) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`normalize` expect a callable with the signature: \ `fn(normalized: NormalizedString)`", )) } else { ToPyResult(pretok.normalize(|normalized| { let norm = PyNormalizedStringRefMut::new(normalized); func.call((norm.get(),), None)?; Ok(()) })) .into() } } fn tokenize(pretok: &mut PreTokenizedString, func: &PyAny) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`tokenize` expect a callable with the signature: \ `fn(str) -> List[Token]`", )) } else { ToPyResult(pretok.tokenize(|normalized| { let output = func.call((normalized.get(),), None)?; Ok(output .extract::<&PyList>()? .into_iter() .map(|obj| Ok(Token::from(obj.extract::<PyToken>()?))) .collect::<PyResult<Vec<_>>>()?) })) .into() } } /// This is an enum #[derive(Clone)] pub struct PyOffsetReferential(OffsetReferential); impl FromPyObject<'_> for PyOffsetReferential { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "original" => Ok(OffsetReferential::Original), "normalized" => Ok(OffsetReferential::Normalized), _ => Err(exceptions::PyValueError::new_err( "Wrong value for OffsetReferential, expected one of `original, normalized`", )), }?)) } } #[derive(Clone)] pub struct PyOffsetType(OffsetType); impl FromPyObject<'_> for PyOffsetType { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "byte" => Ok(OffsetType::Byte), "char" => Ok(OffsetType::Char), _ => Err(exceptions::PyValueError::new_err( "Wrong value for OffsetType, expected one of `byte, char`", )), }?)) } } type PySplit = (String, Offsets, Option<Vec<PyToken>>); fn get_splits( pretok: &PreTokenizedString, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> Vec<PySplit> { pretok .get_splits(offset_referential.0, offset_type.0) .into_iter() .map(|(s, o, t)| { ( s.to_owned(), o, t.as_ref() .map(|tokens| tokens.iter().map(|t| t.clone().into()).collect()), ) }) .collect() } fn to_encoding( pretok: &PreTokenizedString, type_id: u32, word_idx: Option<u32>, ) -> PyResult<PyEncoding> { Ok(ToPyResult( pretok .clone() .into_encoding(word_idx, type_id, tk::OffsetType::Char), ) .into_py()? .into()) } /// PreTokenizedString /// /// Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the /// underlying string, while keeping track of the alignment information (offsets). /// /// The PreTokenizedString manages what we call `splits`. Each split represents a substring /// which is a subpart of the original string, with the relevant offsets and tokens. /// /// When calling one of the methods used to modify the PreTokenizedString (namely one of /// `split`, `normalize` or `tokenize), only the `splits` that don't have any associated /// tokens will get modified. /// /// Args: /// sequence: str: /// The string sequence used to initialize this PreTokenizedString #[pyclass(module = "tokenizers", name = "PreTokenizedString")] pub struct PyPreTokenizedString { pub(crate) pretok: tk::PreTokenizedString, } impl From<PreTokenizedString> for PyPreTokenizedString { fn from(pretok: PreTokenizedString) -> Self { Self { pretok } } } impl From<PyPreTokenizedString> for PreTokenizedString { fn from(pretok: PyPreTokenizedString) -> Self { pretok.pretok } } #[pymethods] impl PyPreTokenizedString { #[new] #[pyo3(text_signature = "(self, sequence)")] fn new(s: &str) -> Self { PreTokenizedString::from(s).into() } /// Split the PreTokenizedString using the given `func` /// /// Args: /// func: Callable[[index, NormalizedString], List[NormalizedString]]: /// The function used to split each underlying split. /// It is expected to return a list of `NormalizedString`, that represent the new /// splits. If the given `NormalizedString` does not need any splitting, we can /// just return it directly. /// In order for the offsets to be tracked accurately, any returned `NormalizedString` /// should come from calling either `.split` or `.slice` on the received one. #[pyo3(text_signature = "(self, func)")] fn split(&mut self, func: &PyAny) -> PyResult<()> { split(&mut self.pretok, func) } /// Normalize each split of the `PreTokenizedString` using the given `func` /// /// Args: /// func: Callable[[NormalizedString], None]: /// The function used to normalize each underlying split. This function /// does not need to return anything, just calling the methods on the provided /// NormalizedString allow its modification. #[pyo3(text_signature = "(self, func)")] fn normalize(&mut self, func: &PyAny) -> PyResult<()> { normalize(&mut self.pretok, func) } /// Tokenize each split of the `PreTokenizedString` using the given `func` /// /// Args: /// func: Callable[[str], List[Token]]: /// The function used to tokenize each underlying split. This function must return /// a list of Token generated from the input str. #[pyo3(text_signature = "(self, func)")] fn tokenize(&mut self, func: &PyAny) -> PyResult<()> { tokenize(&mut self.pretok, func) } /// Return an Encoding generated from this PreTokenizedString /// /// Args: /// type_id: int = 0: /// The type_id to be used on the generated Encoding. /// /// word_idx: Optional[int] = None: /// An optional word index to be used for each token of this Encoding. If provided, /// all the word indices in the generated Encoding will use this value, instead /// of the one automatically tracked during pre-tokenization. /// /// Returns: /// An Encoding #[pyo3(signature = (type_id = 0, word_idx = None))] #[pyo3(text_signature = "(self, type_id=0, word_idx=None)")] fn to_encoding(&self, type_id: u32, word_idx: Option<u32>) -> PyResult<PyEncoding> { to_encoding(&self.pretok, type_id, word_idx) } /// Get the splits currently managed by the PreTokenizedString /// /// Args: /// offset_referential: :obj:`str` /// Whether the returned splits should have offsets expressed relative /// to the original string, or the normalized one. choices: "original", "normalized". /// /// offset_type: :obj:`str` /// Whether the returned splits should have offsets expressed in bytes or chars. /// When slicing an str, we usually want to use chars, which is the default value. /// Now in some cases it might be interesting to get these offsets expressed in bytes, /// so it is possible to change this here. /// choices: "char", "bytes" /// /// Returns /// A list of splits #[pyo3(signature = ( offset_referential = PyOffsetReferential(OffsetReferential::Original), offset_type = PyOffsetType(OffsetType::Char) ))] #[pyo3(text_signature = "(self, offset_referential=\"original\", offset_type=\"char\")")] fn get_splits( &self, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> Vec<PySplit> { get_splits(&self.pretok, offset_referential, offset_type) } } #[pyclass(module = "tokenizers", name = "PreTokenizedString")] #[derive(Clone)] pub struct PyPreTokenizedStringRefMut { inner: RefMutContainer<PreTokenizedString>, } impl DestroyPtr for PyPreTokenizedStringRefMut { fn destroy(&mut self) { self.inner.destroy(); } } impl PyPreTokenizedStringRefMut { pub fn new(pretok: &mut tk::PreTokenizedString) -> RefMutGuard<Self> { // SAFETY: This is safe because we return a RefMutGuard here. // The compiler will make sure the &mut stays valid as necessary. RefMutGuard::new(Self { inner: RefMutContainer::new(pretok), }) } pub fn destroyed_error() -> PyErr { exceptions::PyException::new_err( "Cannot use a PreTokenizedStringRefMut outside `pre_tokenize`", ) } } #[pymethods] impl PyPreTokenizedStringRefMut { fn split(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|pretok| split(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } fn normalize(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|pretok| normalize(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } fn tokenize(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|pretok| tokenize(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } #[pyo3(signature = (type_id = 0, word_idx = None))] fn to_encoding(&self, type_id: u32, word_idx: Option<u32>) -> PyResult<PyEncoding> { self.inner .map(|pretok| to_encoding(pretok, type_id, word_idx)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } #[pyo3(signature = ( offset_referential = PyOffsetReferential(OffsetReferential::Original), offset_type = PyOffsetType(OffsetType::Char) ))] fn get_splits( &self, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> PyResult<Vec<PySplit>> { self.inner .map(|pretok| get_splits(pretok, offset_referential, offset_type)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error) } }
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/mod.rs
use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use std::marker::PhantomData; use std::sync::{Arc, Mutex}; mod iterators; mod normalization; mod pretokenization; mod regex; pub use iterators::*; pub use normalization::*; pub use pretokenization::*; pub use regex::*; // PyChar // This type is a temporary hack to accept `char` as argument // To be removed once https://github.com/PyO3/pyo3/pull/1282 has been released pub struct PyChar(pub char); impl FromPyObject<'_> for PyChar { fn extract(obj: &PyAny) -> PyResult<Self> { let s = <PyString as PyTryFrom<'_>>::try_from(obj)?.to_str()?; let mut iter = s.chars(); if let (Some(ch), None) = (iter.next(), iter.next()) { Ok(Self(ch)) } else { Err(exceptions::PyValueError::new_err( "expected a string of length 1", )) } } } // RefMut utils pub trait DestroyPtr { fn destroy(&mut self); } pub struct RefMutGuard<'r, T: DestroyPtr + Clone> { content: T, r: PhantomData<&'r mut T>, } impl<T: DestroyPtr + Clone> RefMutGuard<'_, T> { pub fn new(content: T) -> Self { Self { content, r: PhantomData, } } pub fn get(&self) -> T { self.content.clone() } } impl<T: DestroyPtr + Clone> Drop for RefMutGuard<'_, T> { fn drop(&mut self) { self.content.destroy() } } #[derive(Clone)] pub struct RefMutContainer<T> { inner: Arc<Mutex<Option<*mut T>>>, } impl<T> RefMutContainer<T> { pub fn new(content: &mut T) -> Self { Self { inner: Arc::new(Mutex::new(Some(content))), } } pub fn map<F: FnOnce(&T) -> U, U>(&self, f: F) -> Option<U> { let lock = self.inner.lock().unwrap(); let ptr = lock.as_ref()?; Some(f(unsafe { ptr.as_ref().unwrap() })) } pub fn map_mut<F: FnOnce(&mut T) -> U, U>(&mut self, f: F) -> Option<U> { let lock = self.inner.lock().unwrap(); let ptr = lock.as_ref()?; Some(f(unsafe { ptr.as_mut().unwrap() })) } } impl<T> DestroyPtr for RefMutContainer<T> { fn destroy(&mut self) { self.inner.lock().unwrap().take(); } } unsafe impl<T: Send> Send for RefMutContainer<T> {} unsafe impl<T: Sync> Sync for RefMutContainer<T> {}
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/regex.rs
use onig::Regex; use pyo3::exceptions; use pyo3::prelude::*; /// Instantiate a new Regex with the given pattern #[pyclass(module = "tokenizers", name = "Regex")] pub struct PyRegex { pub inner: Regex, pub pattern: String, } #[pymethods] impl PyRegex { #[new] #[pyo3(text_signature = "(self, pattern)")] fn new(s: &str) -> PyResult<Self> { Ok(Self { inner: Regex::new(s) .map_err(|e| exceptions::PyException::new_err(e.description().to_owned()))?, pattern: s.to_owned(), }) } }
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/iterators.rs
use pyo3::prelude::*; use std::collections::VecDeque; /// An simple iterator that can be instantiated with a specified length. /// We use this with iterators that don't have a size_hint but we might /// know its size. This is useful with progress bars for example. pub struct MaybeSizedIterator<I> { length: Option<usize>, iter: I, } impl<I> MaybeSizedIterator<I> where I: Iterator, { pub fn new(iter: I, length: Option<usize>) -> Self { Self { length, iter } } } impl<I> Iterator for MaybeSizedIterator<I> where I: Iterator, { type Item = I::Item; fn next(&mut self) -> Option<Self::Item> { self.iter.next() } fn size_hint(&self) -> (usize, Option<usize>) { (self.length.unwrap_or(0), None) } } /// A buffered iterator that takes care of locking the GIL only when needed. /// The `PyIterator` provided by PyO3 keeps a Python GIL token all along /// and thus doesn't allow us to release the GIL to allow having other threads. /// /// This iterator serves two purposes: /// - First, as opposed to the `pyo3::PyIterator`, it is Send and can easily be parallelized /// - Second, this let us release the GIL between two refills of the buffer, allowing other /// Python threads to work pub struct PyBufferedIterator<T, F> { iter: Option<Py<PyAny>>, converter: F, buffer: VecDeque<PyResult<T>>, size: usize, } impl<T, F, I> PyBufferedIterator<T, F> where F: Fn(&PyAny) -> I, I: IntoIterator<Item = PyResult<T>>, { /// Create a new PyBufferedIterator using the provided Python object. /// This object must implement the Python Iterator Protocol, and an error will /// be return if the contract is not respected. /// /// The `converter` provides a way to convert each item in the iterator into /// something that doesn't embed a 'py token and thus allows the GIL to be released /// /// The `buffer_size` represents the number of items that we buffer before we /// need to acquire the GIL again. pub fn new(iter: &PyAny, converter: F, buffer_size: usize) -> PyResult<Self> { let py = iter.py(); let iter: Py<PyAny> = unsafe { py.from_borrowed_ptr_or_err::<PyAny>(pyo3::ffi::PyObject_GetIter(iter.as_ptr()))? .to_object(py) }; Ok(Self { iter: Some(iter), converter, buffer: VecDeque::with_capacity(buffer_size), size: buffer_size, }) } /// Refill the buffer, and set `self.iter` as `None` if nothing more to get fn refill(&mut self) -> PyResult<()> { if self.iter.is_none() { return Ok(()); } Python::with_gil(|py| loop { if self.buffer.len() >= self.size { return Ok(()); } match unsafe { py.from_owned_ptr_or_opt::<PyAny>(pyo3::ffi::PyIter_Next( self.iter.as_ref().unwrap().as_ref(py).as_ptr(), )) } { Some(obj) => self.buffer.extend((self.converter)(obj)), None => { if PyErr::occurred(py) { return Err(PyErr::fetch(py)); } else { self.iter = None; } } }; if self.iter.is_none() { return Ok(()); } }) } } impl<T, F, I> Iterator for PyBufferedIterator<T, F> where F: Fn(&PyAny) -> I, I: IntoIterator<Item = PyResult<T>>, { type Item = PyResult<T>; fn next(&mut self) -> Option<Self::Item> { if !self.buffer.is_empty() { self.buffer.pop_front() } else if self.iter.is_some() { if let Err(e) = self.refill() { return Some(Err(e)); } self.next() } else { None } } }
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/normalization.rs
use super::regex::PyRegex; use super::{DestroyPtr, RefMutContainer, RefMutGuard}; use crate::error::ToPyResult; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use tk::normalizer::{char_to_bytes, NormalizedString, Range, SplitDelimiterBehavior}; use tk::pattern::Pattern; /// Represents a Pattern as used by `NormalizedString` #[derive(Clone, FromPyObject)] pub enum PyPattern<'p> { #[pyo3(annotation = "str")] Str(&'p str), #[pyo3(annotation = "tokenizers.Regex")] Regex(Py<PyRegex>), // TODO: Add the compatibility for Fn(char) -> bool } impl Pattern for PyPattern<'_> { fn find_matches(&self, inside: &str) -> tk::Result<Vec<(tk::Offsets, bool)>> { match self { PyPattern::Str(s) => { let mut chars = s.chars(); if let (Some(c), None) = (chars.next(), chars.next()) { c.find_matches(inside) } else { s.find_matches(inside) } } PyPattern::Regex(r) => { Python::with_gil(|py| (&r.borrow(py).inner).find_matches(inside)) } } } } impl From<PyPattern<'_>> for tk::normalizers::replace::ReplacePattern { fn from(pattern: PyPattern<'_>) -> Self { match pattern { PyPattern::Str(s) => Self::String(s.to_owned()), PyPattern::Regex(r) => Python::with_gil(|py| Self::Regex(r.borrow(py).pattern.clone())), } } } impl From<PyPattern<'_>> for tk::pre_tokenizers::split::SplitPattern { fn from(pattern: PyPattern<'_>) -> Self { match pattern { PyPattern::Str(s) => Self::String(s.to_owned()), PyPattern::Regex(r) => Python::with_gil(|py| Self::Regex(r.borrow(py).pattern.clone())), } } } #[derive(Debug, Clone, FromPyObject)] pub enum PyRange<'s> { #[pyo3(annotation = "int")] Single(isize), #[pyo3(annotation = "Tuple[uint, uint]")] Range(usize, usize), #[pyo3(annotation = "slice")] Slice(&'s PySlice), } impl PyRange<'_> { pub fn to_range(&self, max_len: usize) -> PyResult<std::ops::Range<usize>> { match self { PyRange::Single(i) => { if i.is_negative() { let i = -i as usize; if i > max_len { Err(exceptions::PyValueError::new_err(format!( "{} is bigger than max len", i ))) } else { Ok(max_len - i..max_len - i + 1) } } else { let i = *i as usize; Ok(i..i + 1) } } PyRange::Range(s, e) => Ok(*s..*e), PyRange::Slice(s) => { let r = s.indices(max_len as std::os::raw::c_long)?; Ok(r.start as usize..r.stop as usize) } } } } #[derive(Clone)] pub struct PySplitDelimiterBehavior(pub SplitDelimiterBehavior); impl FromPyObject<'_> for PySplitDelimiterBehavior { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "removed" => Ok(SplitDelimiterBehavior::Removed), "isolated" => Ok(SplitDelimiterBehavior::Isolated), "merged_with_previous" => Ok(SplitDelimiterBehavior::MergedWithPrevious), "merged_with_next" => Ok(SplitDelimiterBehavior::MergedWithNext), "contiguous" => Ok(SplitDelimiterBehavior::Contiguous), _ => Err(exceptions::PyValueError::new_err( "Wrong value for SplitDelimiterBehavior, expected one of: \ `removed, isolated, merged_with_previous, merged_with_next, contiguous`", )), }?)) } } impl From<PySplitDelimiterBehavior> for SplitDelimiterBehavior { fn from(v: PySplitDelimiterBehavior) -> Self { v.0 } } fn filter(normalized: &mut NormalizedString, func: &PyAny) -> PyResult<()> { let err = "`filter` expect a callable with the signature: `fn(char) -> bool`"; if !func.is_callable() { Err(exceptions::PyTypeError::new_err(err)) } else { normalized.filter(|c| { func.call1((c.to_string(),)) .expect(err) .extract() .expect(err) }); Ok(()) } } fn for_each(normalized: &NormalizedString, func: &PyAny) -> PyResult<()> { let err = "`for_each` expect a callable with the signature: `fn(char)`"; if !func.is_callable() { Err(exceptions::PyTypeError::new_err(err)) } else { normalized.for_each(|c| { func.call1((c.to_string(),)).expect(err); }); Ok(()) } } fn map(normalized: &mut NormalizedString, func: &PyAny) -> PyResult<()> { let err = "`map` expect a callable with the signature: `fn(char) -> char`"; if !func.is_callable() { Err(exceptions::PyTypeError::new_err(err)) } else { normalized.map(|c| { let c: &str = func .call1((c.to_string(),)) .expect(err) .extract() .expect(err); c.chars().next().expect(err) }); Ok(()) } } fn slice( normalized: &NormalizedString, range: &PyRange<'_>, ) -> PyResult<Option<PyNormalizedString>> { let n_char = normalized.len(); let char_range = range.to_range(n_char)?; Ok( char_to_bytes(normalized.get(), char_range).and_then(|bytes_range| { normalized .slice(Range::Normalized(bytes_range)) .map(|n| n.into()) }), ) } /// NormalizedString /// /// A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one. /// While making all the requested modifications, it keeps track of the alignment information /// between the two versions of the string. /// /// Args: /// sequence: str: /// The string sequence used to initialize this NormalizedString #[pyclass(module = "tokenizers", name = "NormalizedString")] #[derive(Clone)] pub struct PyNormalizedString { pub(crate) normalized: NormalizedString, } #[pymethods] impl PyNormalizedString { #[new] #[pyo3(text_signature = None)] fn new(s: &str) -> Self { NormalizedString::from(s).into() } /// The normalized part of the string #[getter] fn get_normalized(&self) -> &str { self.normalized.get() } #[getter] fn get_original(&self) -> &str { self.normalized.get_original() } /// Runs the NFD normalization #[pyo3(text_signature = "(self)")] fn nfd(&mut self) { self.normalized.nfd(); } /// Runs the NFKD normalization #[pyo3(text_signature = "(self)")] fn nfkd(&mut self) { self.normalized.nfkd(); } /// Runs the NFC normalization #[pyo3(text_signature = "(self)")] fn nfc(&mut self) { self.normalized.nfc(); } /// Runs the NFKC normalization #[pyo3(text_signature = "(self)")] fn nfkc(&mut self) { self.normalized.nfkc(); } /// Lowercase the string #[pyo3(text_signature = "(self)")] fn lowercase(&mut self) { self.normalized.lowercase(); } /// Uppercase the string #[pyo3(text_signature = "(self)")] fn uppercase(&mut self) { self.normalized.uppercase(); } /// Prepend the given sequence to the string #[pyo3(text_signature = "(self, s)")] fn prepend(&mut self, s: &str) { self.normalized.prepend(s); } /// Append the given sequence to the string #[pyo3(text_signature = "(self, s)")] fn append(&mut self, s: &str) { self.normalized.append(s); } /// Strip the left of the string #[pyo3(text_signature = "(self)")] fn lstrip(&mut self) { self.normalized.lstrip(); } /// Strip the right of the string #[pyo3(text_signature = "(self)")] fn rstrip(&mut self) { self.normalized.rstrip(); } /// Strip both ends of the string #[pyo3(text_signature = "(self)")] fn strip(&mut self) { self.normalized.strip(); } /// Clears the string #[pyo3(text_signature = "(self)")] fn clear(&mut self) { self.normalized.clear(); } /// Slice the string using the given range #[pyo3(text_signature = "(self, range)")] fn slice(&self, range: PyRange) -> PyResult<Option<PyNormalizedString>> { slice(&self.normalized, &range) } /// Filter each character of the string using the given func #[pyo3(text_signature = "(self, func)")] fn filter(&mut self, func: &PyAny) -> PyResult<()> { filter(&mut self.normalized, func) } /// Calls the given function for each character of the string #[pyo3(text_signature = "(self, func)")] fn for_each(&self, func: &PyAny) -> PyResult<()> { for_each(&self.normalized, func) } /// Calls the given function for each character of the string /// /// Replaces each character of the string using the returned value. Each /// returned value **must** be a str of length 1 (ie a character). #[pyo3(text_signature = "(self, func)")] fn map(&mut self, func: &PyAny) -> PyResult<()> { map(&mut self.normalized, func) } /// Split the NormalizedString using the given pattern and the specified behavior /// /// Args: /// pattern: Pattern: /// A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex` /// /// behavior: SplitDelimiterBehavior: /// The behavior to use when splitting. /// Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", /// "contiguous" /// /// Returns: /// A list of NormalizedString, representing each split #[pyo3(text_signature = "(self, pattern, behavior)")] fn split( &mut self, pattern: PyPattern, behavior: PySplitDelimiterBehavior, ) -> PyResult<Vec<PyNormalizedString>> { Ok(ToPyResult(self.normalized.split(pattern, behavior.into())) .into_py()? .into_iter() .map(|n| n.into()) .collect()) } /// Replace the content of the given pattern with the provided content /// /// Args: /// pattern: Pattern: /// A pattern used to match the string. Usually a string or a Regex /// /// content: str: /// The content to be used as replacement #[pyo3(text_signature = "(self, pattern, content)")] fn replace(&mut self, pattern: PyPattern, content: &str) -> PyResult<()> { ToPyResult(self.normalized.replace(pattern, content)).into() } fn __repr__(&self) -> String { format!( r#"NormalizedString(original="{}", normalized="{}")"#, self.normalized.get_original(), self.normalized.get() ) } fn __str__(&self) -> &str { self.normalized.get() } fn __getitem__(&self, range: PyRange<'_>) -> PyResult<Option<PyNormalizedString>> { slice(&self.normalized, &range) } } impl From<NormalizedString> for PyNormalizedString { fn from(normalized: NormalizedString) -> Self { Self { normalized } } } impl From<PyNormalizedString> for NormalizedString { fn from(normalized: PyNormalizedString) -> Self { normalized.normalized } } #[pyclass(module = "tokenizers", name = "NormalizedStringRefMut")] #[derive(Clone)] pub struct PyNormalizedStringRefMut { inner: RefMutContainer<NormalizedString>, } impl DestroyPtr for PyNormalizedStringRefMut { fn destroy(&mut self) { self.inner.destroy(); } } impl PyNormalizedStringRefMut { pub fn new(normalized: &mut NormalizedString) -> RefMutGuard<Self> { RefMutGuard::new(Self { inner: RefMutContainer::new(normalized), }) } pub fn destroyed_error() -> PyErr { exceptions::PyException::new_err("Cannot use a NormalizedStringRefMut outside `normalize`") } /// Provides a way to access a reference to the underlying NormalizedString pub fn map_as_ref<F: FnOnce(&NormalizedString) -> U, U>(&self, f: F) -> PyResult<U> { self.inner .map(f) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } /// Provides a way to access a mutable reference to the underlying NormalizedString pub fn map_as_mut<F: FnOnce(&mut NormalizedString) -> U, U>(&mut self, f: F) -> PyResult<U> { self.inner .map_mut(f) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } } #[pymethods] impl PyNormalizedStringRefMut { #[getter] fn get_normalized(&self) -> PyResult<String> { self.inner .map(|n| n.get().to_owned()) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } #[getter] fn get_original(&self) -> PyResult<String> { self.inner .map(|n| n.get_original().to_owned()) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } fn nfd(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfd(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn nfkd(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfkd(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn nfc(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfc(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn nfkc(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfkc(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn lowercase(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.lowercase(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn uppercase(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.uppercase(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn prepend(&mut self, s: &str) -> PyResult<()> { self.inner .map_mut(|n| { n.prepend(s); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn append(&mut self, s: &str) -> PyResult<()> { self.inner .map_mut(|n| { n.append(s); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn lstrip(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.lstrip(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn rstrip(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.rstrip(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn strip(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.strip(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn clear(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.clear(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn slice(&self, range: PyRange) -> PyResult<Option<PyNormalizedString>> { self.inner .map(|n| slice(n, &range)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)? } fn filter(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|n| filter(n, func)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)??; Ok(()) } fn for_each(&self, func: &PyAny) -> PyResult<()> { self.inner .map(|n| for_each(n, func)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)??; Ok(()) } fn map(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|n| map(n, func)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)??; Ok(()) } fn split( &mut self, pattern: PyPattern, behavior: PySplitDelimiterBehavior, ) -> PyResult<Vec<PyNormalizedString>> { Ok(ToPyResult( self.inner .map_mut(|n| n.split(pattern, behavior.into())) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?, ) .into_py()? .into_iter() .map(|n| n.into()) .collect()) } fn replace(&mut self, pattern: PyPattern, content: &str) -> PyResult<()> { ToPyResult( self.inner .map_mut(|n| n.replace(pattern, content)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?, ) .into() } }
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/.eslintrc.yml
parser: '@typescript-eslint/parser' parserOptions: ecmaFeatures: jsx: true ecmaVersion: latest sourceType: module project: ./tsconfig.json env: browser: true es6: true node: true jest: true ignorePatterns: ['index.js', 'target/'] plugins: - import - '@typescript-eslint' extends: - eslint:recommended - plugin:prettier/recommended rules: # 0 = off, 1 = warn, 2 = error 'space-before-function-paren': 0 'no-useless-constructor': 0 'no-undef': 2 'no-console': [2, { allow: ['error', 'warn', 'info', 'assert'] }] 'comma-dangle': ['error', 'only-multiline'] 'no-unused-vars': 0 'no-var': 2 'one-var-declaration-per-line': 2 'prefer-const': 2 'no-const-assign': 2 'no-duplicate-imports': 2 'no-use-before-define': [2, { 'functions': false, 'classes': false }] 'eqeqeq': [2, 'always', { 'null': 'ignore' }] 'no-case-declarations': 0 'no-restricted-syntax': [ 2, { 'selector': 'BinaryExpression[operator=/(==|===|!=|!==)/][left.raw=true], BinaryExpression[operator=/(==|===|!=|!==)/][right.raw=true]', 'message': Don't compare for equality against boolean literals, }, ] # https://github.com/benmosher/eslint-plugin-import/pull/334 'import/no-duplicates': 2 'import/first': 2 'import/newline-after-import': 2 'import/order': [ 2, { 'newlines-between': 'always', 'alphabetize': { 'order': 'asc' }, 'groups': ['builtin', 'external', 'internal', 'parent', 'sibling', 'index'], }, ] overrides: - files: - ./**/*{.ts,.tsx} rules: 'no-unused-vars': [2, { varsIgnorePattern: '^_', argsIgnorePattern: '^_', ignoreRestSiblings: true }] 'no-undef': 0 # TypeScript declare merge 'no-redeclare': 0 'no-useless-constructor': 0 'no-dupe-class-members': 0 'no-case-declarations': 0 'no-duplicate-imports': 0 # TypeScript Interface and Type 'no-use-before-define': 0 '@typescript-eslint/adjacent-overload-signatures': 2 '@typescript-eslint/await-thenable': 2 '@typescript-eslint/consistent-type-assertions': 2 '@typescript-eslint/ban-types': [ 'error', { 'types': { 'String': { 'message': 'Use string instead', 'fixWith': 'string' }, 'Number': { 'message': 'Use number instead', 'fixWith': 'number' }, 'Boolean': { 'message': 'Use boolean instead', 'fixWith': 'boolean' }, 'Function': { 'message': 'Use explicit type instead' }, }, }, ] '@typescript-eslint/explicit-member-accessibility': [ 'error', { accessibility: 'explicit', overrides: { accessors: 'no-public', constructors: 'no-public', methods: 'no-public', properties: 'no-public', parameterProperties: 'explicit', }, }, ] '@typescript-eslint/method-signature-style': 2 '@typescript-eslint/no-floating-promises': 2 '@typescript-eslint/no-implied-eval': 2 '@typescript-eslint/no-for-in-array': 2 '@typescript-eslint/no-inferrable-types': 2 '@typescript-eslint/no-invalid-void-type': 2 '@typescript-eslint/no-misused-new': 2 '@typescript-eslint/no-misused-promises': 2 '@typescript-eslint/no-namespace': 2 '@typescript-eslint/no-non-null-asserted-optional-chain': 2 '@typescript-eslint/no-throw-literal': 2 '@typescript-eslint/no-unnecessary-boolean-literal-compare': 2 '@typescript-eslint/prefer-for-of': 2 '@typescript-eslint/prefer-nullish-coalescing': 2 '@typescript-eslint/switch-exhaustiveness-check': 2 '@typescript-eslint/prefer-optional-chain': 2 '@typescript-eslint/prefer-readonly': 2 '@typescript-eslint/prefer-string-starts-ends-with': 0 '@typescript-eslint/no-array-constructor': 2 '@typescript-eslint/require-await': 2 '@typescript-eslint/return-await': 2 '@typescript-eslint/ban-ts-comment': [2, { 'ts-expect-error': false, 'ts-ignore': true, 'ts-nocheck': true, 'ts-check': false }] '@typescript-eslint/naming-convention': [ 2, { selector: 'memberLike', format: ['camelCase', 'PascalCase'], modifiers: ['private'], leadingUnderscore: 'forbid', }, ] '@typescript-eslint/no-unused-vars': [2, { varsIgnorePattern: '^_', argsIgnorePattern: '^_', ignoreRestSiblings: true }] '@typescript-eslint/member-ordering': [ 2, { default: [ 'public-static-field', 'protected-static-field', 'private-static-field', 'public-static-method', 'protected-static-method', 'private-static-method', 'public-instance-field', 'protected-instance-field', 'private-instance-field', 'public-constructor', 'protected-constructor', 'private-constructor', 'public-instance-method', 'protected-instance-method', 'private-instance-method', ], }, ]
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/index.js
/* tslint:disable */ /* eslint-disable */ /* prettier-ignore */ /* auto-generated by NAPI-RS */ const { existsSync, readFileSync } = require('fs') const { join } = require('path') const { platform, arch } = process let nativeBinding = null let localFileExisted = false let loadError = null function isMusl() { // For Node 10 if (!process.report || typeof process.report.getReport !== 'function') { try { const lddPath = require('child_process').execSync('which ldd').toString().trim() return readFileSync(lddPath, 'utf8').includes('musl') } catch (e) { return true } } else { const { glibcVersionRuntime } = process.report.getReport().header return !glibcVersionRuntime } } switch (platform) { case 'android': switch (arch) { case 'arm64': localFileExisted = existsSync(join(__dirname, 'tokenizers.android-arm64.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.android-arm64.node') } else { nativeBinding = require('tokenizers-android-arm64') } } catch (e) { loadError = e } break case 'arm': localFileExisted = existsSync(join(__dirname, 'tokenizers.android-arm-eabi.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.android-arm-eabi.node') } else { nativeBinding = require('tokenizers-android-arm-eabi') } } catch (e) { loadError = e } break default: throw new Error(`Unsupported architecture on Android ${arch}`) } break case 'win32': switch (arch) { case 'x64': localFileExisted = existsSync(join(__dirname, 'tokenizers.win32-x64-msvc.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.win32-x64-msvc.node') } else { nativeBinding = require('tokenizers-win32-x64-msvc') } } catch (e) { loadError = e } break case 'ia32': localFileExisted = existsSync(join(__dirname, 'tokenizers.win32-ia32-msvc.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.win32-ia32-msvc.node') } else { nativeBinding = require('tokenizers-win32-ia32-msvc') } } catch (e) { loadError = e } break case 'arm64': localFileExisted = existsSync(join(__dirname, 'tokenizers.win32-arm64-msvc.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.win32-arm64-msvc.node') } else { nativeBinding = require('tokenizers-win32-arm64-msvc') } } catch (e) { loadError = e } break default: throw new Error(`Unsupported architecture on Windows: ${arch}`) } break case 'darwin': localFileExisted = existsSync(join(__dirname, 'tokenizers.darwin-universal.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.darwin-universal.node') } else { nativeBinding = require('tokenizers-darwin-universal') } break } catch {} switch (arch) { case 'x64': localFileExisted = existsSync(join(__dirname, 'tokenizers.darwin-x64.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.darwin-x64.node') } else { nativeBinding = require('tokenizers-darwin-x64') } } catch (e) { loadError = e } break case 'arm64': localFileExisted = existsSync(join(__dirname, 'tokenizers.darwin-arm64.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.darwin-arm64.node') } else { nativeBinding = require('tokenizers-darwin-arm64') } } catch (e) { loadError = e } break default: throw new Error(`Unsupported architecture on macOS: ${arch}`) } break case 'freebsd': if (arch !== 'x64') { throw new Error(`Unsupported architecture on FreeBSD: ${arch}`) } localFileExisted = existsSync(join(__dirname, 'tokenizers.freebsd-x64.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.freebsd-x64.node') } else { nativeBinding = require('tokenizers-freebsd-x64') } } catch (e) { loadError = e } break case 'linux': switch (arch) { case 'x64': if (isMusl()) { localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-x64-musl.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.linux-x64-musl.node') } else { nativeBinding = require('tokenizers-linux-x64-musl') } } catch (e) { loadError = e } } else { localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-x64-gnu.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.linux-x64-gnu.node') } else { nativeBinding = require('tokenizers-linux-x64-gnu') } } catch (e) { loadError = e } } break case 'arm64': if (isMusl()) { localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-arm64-musl.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.linux-arm64-musl.node') } else { nativeBinding = require('tokenizers-linux-arm64-musl') } } catch (e) { loadError = e } } else { localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-arm64-gnu.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.linux-arm64-gnu.node') } else { nativeBinding = require('tokenizers-linux-arm64-gnu') } } catch (e) { loadError = e } } break case 'arm': localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-arm-gnueabihf.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.linux-arm-gnueabihf.node') } else { nativeBinding = require('tokenizers-linux-arm-gnueabihf') } } catch (e) { loadError = e } break default: throw new Error(`Unsupported architecture on Linux: ${arch}`) } break default: throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`) } if (!nativeBinding) { if (loadError) { throw loadError } throw new Error(`Failed to load native binding`) } const { Decoder, bpeDecoder, byteFallbackDecoder, ctcDecoder, fuseDecoder, metaspaceDecoder, replaceDecoder, sequenceDecoder, stripDecoder, wordPieceDecoder, Encoding, TruncationDirection, TruncationStrategy, Model, BPE, WordPiece, WordLevel, Unigram, Normalizer, prependNormalizer, stripAccentsNormalizer, bertNormalizer, nfdNormalizer, nfkdNormalizer, nfcNormalizer, nfkcNormalizer, stripNormalizer, sequenceNormalizer, lowercase, replace, nmt, precompiled, JsSplitDelimiterBehavior, PreTokenizer, byteLevelPreTokenizer, byteLevelAlphabet, whitespacePreTokenizer, whitespaceSplitPreTokenizer, bertPreTokenizer, metaspacePreTokenizer, splitPreTokenizer, punctuationPreTokenizer, sequencePreTokenizer, charDelimiterSplit, digitsPreTokenizer, Processor, bertProcessing, robertaProcessing, byteLevelProcessing, templateProcessing, sequenceProcessing, PaddingDirection, AddedToken, Tokenizer, Trainer, slice, mergeEncodings, } = nativeBinding module.exports.Decoder = Decoder module.exports.bpeDecoder = bpeDecoder module.exports.byteFallbackDecoder = byteFallbackDecoder module.exports.ctcDecoder = ctcDecoder module.exports.fuseDecoder = fuseDecoder module.exports.metaspaceDecoder = metaspaceDecoder module.exports.replaceDecoder = replaceDecoder module.exports.sequenceDecoder = sequenceDecoder module.exports.stripDecoder = stripDecoder module.exports.wordPieceDecoder = wordPieceDecoder module.exports.Encoding = Encoding module.exports.TruncationDirection = TruncationDirection module.exports.TruncationStrategy = TruncationStrategy module.exports.Model = Model module.exports.BPE = BPE module.exports.WordPiece = WordPiece module.exports.WordLevel = WordLevel module.exports.Unigram = Unigram module.exports.Normalizer = Normalizer module.exports.prependNormalizer = prependNormalizer module.exports.stripAccentsNormalizer = stripAccentsNormalizer module.exports.bertNormalizer = bertNormalizer module.exports.nfdNormalizer = nfdNormalizer module.exports.nfkdNormalizer = nfkdNormalizer module.exports.nfcNormalizer = nfcNormalizer module.exports.nfkcNormalizer = nfkcNormalizer module.exports.stripNormalizer = stripNormalizer module.exports.sequenceNormalizer = sequenceNormalizer module.exports.lowercase = lowercase module.exports.replace = replace module.exports.nmt = nmt module.exports.precompiled = precompiled module.exports.JsSplitDelimiterBehavior = JsSplitDelimiterBehavior module.exports.PreTokenizer = PreTokenizer module.exports.byteLevelPreTokenizer = byteLevelPreTokenizer module.exports.byteLevelAlphabet = byteLevelAlphabet module.exports.whitespacePreTokenizer = whitespacePreTokenizer module.exports.whitespaceSplitPreTokenizer = whitespaceSplitPreTokenizer module.exports.bertPreTokenizer = bertPreTokenizer module.exports.metaspacePreTokenizer = metaspacePreTokenizer module.exports.splitPreTokenizer = splitPreTokenizer module.exports.punctuationPreTokenizer = punctuationPreTokenizer module.exports.sequencePreTokenizer = sequencePreTokenizer module.exports.charDelimiterSplit = charDelimiterSplit module.exports.digitsPreTokenizer = digitsPreTokenizer module.exports.Processor = Processor module.exports.bertProcessing = bertProcessing module.exports.robertaProcessing = robertaProcessing module.exports.byteLevelProcessing = byteLevelProcessing module.exports.templateProcessing = templateProcessing module.exports.sequenceProcessing = sequenceProcessing module.exports.PaddingDirection = PaddingDirection module.exports.AddedToken = AddedToken module.exports.Tokenizer = Tokenizer module.exports.Trainer = Trainer module.exports.slice = slice module.exports.mergeEncodings = mergeEncodings
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/build.rs
extern crate napi_build; fn main() { napi_build::setup(); }
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/jest.config.js
/* eslint-disable prettier/prettier */ // For a detailed explanation regarding each configuration property, visit: // https://jestjs.io/docs/en/configuration.html module.exports = { // All imported modules in your tests should be mocked automatically // automock: false, // Stop running tests after `n` failures // bail: 0, // Respect "browser" field in package.json when resolving modules // browser: false, // The directory where Jest should store its cached dependency information // cacheDirectory: "/private/var/folders/y_/n6h0fkqn3m57bg_ktk25j7rm0000gn/T/jest_dx", // Automatically clear mock calls and instances between every test // clearMocks: false, // Indicates whether the coverage information should be collected while executing the test // collectCoverage: false, // An array of glob patterns indicating a set of files for which coverage information should be collected // collectCoverageFrom: null, // The directory where Jest should output its coverage files // coverageDirectory: null, // An array of regexp pattern strings used to skip coverage collection // coveragePathIgnorePatterns: [ // "/node_modules/" // ], // A list of reporter names that Jest uses when writing coverage reports // coverageReporters: [ // "json", // "text", // "lcov", // "clover" // ], // An object that configures minimum threshold enforcement for coverage results // coverageThreshold: null, // A path to a custom dependency extractor // dependencyExtractor: null, // Make calling deprecated APIs throw helpful error messages // errorOnDeprecated: false, // Force coverage collection from ignored files using an array of glob patterns // forceCoverageMatch: [], // A path to a module which exports an async function that is triggered once before all test suites // globalSetup: null, // A path to a module which exports an async function that is triggered once after all test suites // globalTeardown: null, // A set of global variables that need to be available in all test environments // globals: {}, // The maximum amount of workers used to run your tests. Can be specified as % or a number. E.g. maxWorkers: 10% will use 10% of your CPU amount + 1 as the maximum worker number. maxWorkers: 2 will use a maximum of 2 workers. // maxWorkers: "50%", // An array of directory names to be searched recursively up from the requiring module's location // moduleDirectories: [ // "node_modules" // ], // An array of file extensions your modules use // moduleFileExtensions: [ // "js", // "json", // "jsx", // "ts", // "tsx", // "node" // ], // A map from regular expressions to module names that allow to stub out resources with a single module // moduleNameMapper: {}, // An array of regexp pattern strings, matched against all module paths before considered 'visible' to the module loader // modulePathIgnorePatterns: [], // Activates notifications for test results // notify: false, // An enum that specifies notification mode. Requires { notify: true } // notifyMode: "failure-change", // A preset that is used as a base for Jest's configuration preset: 'ts-jest', // Run tests from one or more projects // projects: null, // Use this configuration option to add custom reporters to Jest // reporters: undefined, // Automatically reset mock state between every test // resetMocks: false, // Reset the module registry before running each individual test // resetModules: false, // A path to a custom resolver // resolver: null, // Automatically restore mock state between every test // restoreMocks: false, // The root directory that Jest should scan for tests and modules within // rootDir: null, // A list of paths to directories that Jest should use to search for files in // roots: [ // "<rootDir>" // ], // Allows you to use a custom runner instead of Jest's default test runner // runner: "jest-runner", // The paths to modules that run some code to configure or set up the testing environment before each test // setupFiles: [], // A list of paths to modules that run some code to configure or set up the testing framework before each test // setupFilesAfterEnv: [], // A list of paths to snapshot serializer modules Jest should use for snapshot testing // snapshotSerializers: [], // The test environment that will be used for testing testEnvironment: 'node', // Options that will be passed to the testEnvironment // testEnvironmentOptions: {}, // Adds a location field to test results // testLocationInResults: false, // The glob patterns Jest uses to detect test files // testMatch: [ // "**/__tests__/**/*.[jt]s?(x)", // "**/?(*.)+(spec|test).[tj]s?(x)" // ], // An array of regexp pattern strings that are matched against all test paths, matched tests are skipped testPathIgnorePatterns: ['/node_modules/', '/dist/'], // The regexp pattern or array of patterns that Jest uses to detect test files // testRegex: [], // This option allows the use of a custom results processor // testResultsProcessor: null, // This option allows use of a custom test runner // testRunner: "jasmine2", // This option sets the URL for the jsdom environment. It is reflected in properties such as location.href // testURL: "http://localhost", // Setting this value to "fake" allows the use of fake timers for functions such as "setTimeout" // timers: "real", // A map from regular expressions to paths to transformers // transform: null, // An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation // transformIgnorePatterns: [ // "/node_modules/" // ], // An array of regexp pattern strings that are matched against all modules before the module loader will automatically return a mock for them // unmockedModulePathPatterns: undefined, // Indicates whether each individual test should be reported during the run // verbose: null, // An array of regexp patterns that are matched against all source file paths before re-running tests in watch mode watchPathIgnorePatterns: ['<rootDir>/node_modules/', '<rootDir>/native/', '<rootDir>/dist/', '<rootDir>/build/'], // Whether to use watchman for file crawling // watchman: true, }
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/tsconfig.json
{ "compilerOptions": { "target": "ES2018", "strict": true, "moduleResolution": "node", "module": "CommonJS", "noUnusedLocals": true, "noUnusedParameters": true, "esModuleInterop": true, "allowSyntheticDefaultImports": true }, "include": ["."], "exclude": ["node_modules"] }
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/README.md
<p align="center"> <br> <img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/> <br> <p> <p align="center"> <a href="https://badge.fury.io/js/tokenizers"> <img alt="Build" src="https://badge.fury.io/js/tokenizers.svg"> </a> <a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE"> <img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue"> </a> </p> <br> NodeJS implementation of today's most used tokenizers, with a focus on performance and versatility. Bindings over the [Rust](https://github.com/huggingface/tokenizers/tree/master/tokenizers) implementation. If you are interested in the High-level design, you can go check it there. ## Main features - Train new vocabularies and tokenize using 4 pre-made tokenizers (Bert WordPiece and the 3 most common BPE versions). - Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes less than 20 seconds to tokenize a GB of text on a server's CPU. - Easy to use, but also extremely versatile. - Designed for research and production. - Normalization comes with alignments tracking. It's always possible to get the part of the original sentence that corresponds to a given token. - Does all the pre-processing: Truncate, Pad, add the special tokens your model needs. ## Installation ```bash npm install tokenizers@latest ``` ## Basic example ```ts import { Tokenizer } from "tokenizers"; const tokenizer = await Tokenizer.fromFile("tokenizer.json"); const wpEncoded = await tokenizer.encode("Who is John?"); console.log(wpEncoded.getLength()); console.log(wpEncoded.getTokens()); console.log(wpEncoded.getIds()); console.log(wpEncoded.getAttentionMask()); console.log(wpEncoded.getOffsets()); console.log(wpEncoded.getOverflowing()); console.log(wpEncoded.getSpecialTokensMask()); console.log(wpEncoded.getTypeIds()); console.log(wpEncoded.getWordIds()); ``` ## License [Apache License 2.0](../../LICENSE)
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/.yarnrc.yml
nodeLinker: node-modules npmAuditRegistry: 'https://registry.npmjs.org' yarnPath: .yarn/releases/yarn-3.5.1.cjs
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/package.json
{ "name": "tokenizers", "version": "0.14.0-dev0", "repository": { "type": "git", "url": "git+https://github.com/huggingface/tokenizers.git" }, "bugs": { "url": "https://github.com/huggingface/tokenizers/issues" }, "homepage": "https://github.com/huggingface/tokenizers/tree/master/bindings/node", "author": "Anthony MOI <[email protected]>", "license": "Apache-2.0", "description": "Provides an implementation of today's most used tokenizers, with a focus on performances and versatility.", "files": [ "index.d.ts", "index.js" ], "napi": { "name": "tokenizers", "triples": { "defaults": true, "additional": [ "x86_64-unknown-linux-musl", "aarch64-unknown-linux-gnu", "i686-pc-windows-msvc", "armv7-unknown-linux-gnueabihf", "aarch64-apple-darwin", "aarch64-linux-android", "x86_64-unknown-freebsd", "aarch64-unknown-linux-musl", "aarch64-pc-windows-msvc", "armv7-linux-androideabi" ] } }, "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "scripts": { "artifacts": "napi artifacts", "bench": "node -r @swc-node/register benchmark/bench.ts", "build": "napi build --platform --release --pipe \"prettier -w\"", "build:debug": "napi build --platform --pipe \"prettier -w\"", "format": "run-p format:prettier format:rs format:toml", "format:prettier": "prettier . -w", "format:toml": "taplo format", "format:rs": "cargo fmt", "lint": "eslint . -c ./.eslintrc.yml", "prepublishOnly": "napi prepublish -t npm", "test": "jest", "version": "napi version" }, "devDependencies": { "@napi-rs/cli": "^2.14.6", "@swc-node/register": "^1.5.5", "@swc/core": "^1.3.32", "@taplo/cli": "^0.5.2", "@types/jest": "^29.5.1", "@typescript-eslint/eslint-plugin": "^5.50.0", "@typescript-eslint/parser": "^5.50.0", "ava": "^5.1.1", "benny": "^3.7.1", "chalk": "^5.2.0", "eslint": "^8.33.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-prettier": "^4.2.1", "husky": "^8.0.3", "jest": "^29.5.0", "lint-staged": "^13.1.0", "npm-run-all": "^4.1.5", "prettier": "^2.8.3", "ts-jest": "^29.1.0", "typescript": "^5.0.0" }, "lint-staged": { "*.@(js|ts|tsx)": [ "eslint -c .eslintrc.yml --fix" ], "*.@(js|ts|tsx|yml|yaml|md|json)": [ "prettier --write" ], "*.toml": [ "taplo format" ] }, "ava": { "require": [ "@swc-node/register" ], "extensions": [ "ts" ], "timeout": "2m", "workerThreads": false, "environmentVariables": { "TS_NODE_PROJECT": "./tsconfig.json" } }, "prettier": { "printWidth": 120, "semi": false, "trailingComma": "all", "singleQuote": true, "arrowParens": "always" }, "packageManager": "[email protected]" }
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/.prettierignore
target .yarn
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/types.ts
export type TextInputSequence = string export type PreTokenizedInputSequence = string[] export type InputSequence = TextInputSequence | PreTokenizedInputSequence export type TextEncodeInput = TextInputSequence | [TextInputSequence, TextInputSequence] export type PreTokenizedEncodeInput = PreTokenizedInputSequence | [PreTokenizedInputSequence, PreTokenizedInputSequence] export type EncodeInput = TextEncodeInput | PreTokenizedEncodeInput
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/yarn.lock
# This file is generated by running "yarn install" inside your project. # Manual changes might be lost - proceed with caution! __metadata: version: 6 cacheKey: 8 "@aashutoshrathi/word-wrap@npm:^1.2.3": version: 1.2.6 resolution: "@aashutoshrathi/word-wrap@npm:1.2.6" checksum: ada901b9e7c680d190f1d012c84217ce0063d8f5c5a7725bb91ec3c5ed99bb7572680eb2d2938a531ccbaec39a95422fcd8a6b4a13110c7d98dd75402f66a0cd languageName: node linkType: hard "@ampproject/remapping@npm:^2.2.0": version: 2.2.1 resolution: "@ampproject/remapping@npm:2.2.1" dependencies: "@jridgewell/gen-mapping": ^0.3.0 "@jridgewell/trace-mapping": ^0.3.9 checksum: 03c04fd526acc64a1f4df22651186f3e5ef0a9d6d6530ce4482ec9841269cf7a11dbb8af79237c282d721c5312024ff17529cd72cc4768c11e999b58e2302079 languageName: node linkType: hard "@arrows/array@npm:^1.4.1": version: 1.4.1 resolution: "@arrows/array@npm:1.4.1" dependencies: "@arrows/composition": ^1.2.2 checksum: 39de47a49709376d91360955665f5cc33ad6fce85125a5b1fde777bf963bd2d053cc77a587253a55e6f4241a75ad7db991aacc26eb36edb7a746d824eb8ebd8a languageName: node linkType: hard "@arrows/composition@npm:^1.0.0, @arrows/composition@npm:^1.2.2": version: 1.2.2 resolution: "@arrows/composition@npm:1.2.2" checksum: 3219e9a4e220c9778d8919fef329608b9966667b61f26e403d368646ebc65d96b68abcb7a73621992baad678e444ceb36914f1f2db2d6502ddfe738e9230e737 languageName: node linkType: hard "@arrows/dispatch@npm:^1.0.2": version: 1.0.3 resolution: "@arrows/dispatch@npm:1.0.3" dependencies: "@arrows/composition": ^1.2.2 checksum: 2bd0b1ad5345b056cd300b63eedf3a1b9f17e8f891a5b5d1e70e9a3d8c426ec05828c38cd437f742e75387fbc98b3082fef23f62fe97688b63d060376d50dcd9 languageName: node linkType: hard "@arrows/error@npm:^1.0.2": version: 1.0.2 resolution: "@arrows/error@npm:1.0.2" checksum: 35ad67e8d2781879a22711f5c7ba3907d6772ff42b24abc8b94b5165414e802f6c207f2024f50508c8f40637465a91da268ebf321c0eef5aaf44fc3d4acc7a58 languageName: node linkType: hard "@arrows/multimethod@npm:^1.1.6": version: 1.4.1 resolution: "@arrows/multimethod@npm:1.4.1" dependencies: "@arrows/array": ^1.4.1 "@arrows/composition": ^1.2.2 "@arrows/error": ^1.0.2 fast-deep-equal: ^3.1.3 checksum: 2a3a6b62debb163448ce1e90c9a0508866e605895967a67ef3c65f5248e5e7318ae95a92d4a62aff0518eea63755cc0467deb3265c3c9b41e00a892802ae729a languageName: node linkType: hard "@babel/code-frame@npm:^7.0.0, @babel/code-frame@npm:^7.12.13, @babel/code-frame@npm:^7.22.10, @babel/code-frame@npm:^7.22.5": version: 7.22.10 resolution: "@babel/code-frame@npm:7.22.10" dependencies: "@babel/highlight": ^7.22.10 chalk: ^2.4.2 checksum: 89a06534ad19759da6203a71bad120b1d7b2ddc016c8e07d4c56b35dea25e7396c6da60a754e8532a86733092b131ae7f661dbe6ba5d165ea777555daa2ed3c9 languageName: node linkType: hard "@babel/code-frame@npm:^7.22.13": version: 7.22.13 resolution: "@babel/code-frame@npm:7.22.13" dependencies: "@babel/highlight": ^7.22.13 chalk: ^2.4.2 checksum: 22e342c8077c8b77eeb11f554ecca2ba14153f707b85294fcf6070b6f6150aae88a7b7436dd88d8c9289970585f3fe5b9b941c5aa3aa26a6d5a8ef3f292da058 languageName: node linkType: hard "@babel/compat-data@npm:^7.22.9": version: 7.22.9 resolution: "@babel/compat-data@npm:7.22.9" checksum: bed77d9044ce948b4327b30dd0de0779fa9f3a7ed1f2d31638714ed00229fa71fc4d1617ae0eb1fad419338d3658d0e9a5a083297451e09e73e078d0347ff808 languageName: node linkType: hard "@babel/core@npm:^7.11.6, @babel/core@npm:^7.12.3": version: 7.22.11 resolution: "@babel/core@npm:7.22.11" dependencies: "@ampproject/remapping": ^2.2.0 "@babel/code-frame": ^7.22.10 "@babel/generator": ^7.22.10 "@babel/helper-compilation-targets": ^7.22.10 "@babel/helper-module-transforms": ^7.22.9 "@babel/helpers": ^7.22.11 "@babel/parser": ^7.22.11 "@babel/template": ^7.22.5 "@babel/traverse": ^7.22.11 "@babel/types": ^7.22.11 convert-source-map: ^1.7.0 debug: ^4.1.0 gensync: ^1.0.0-beta.2 json5: ^2.2.3 semver: ^6.3.1 checksum: f258b2539ea2e5bfe55a708c2f3e1093a1b4744f12becc35abeb896037b66210de9a8ad6296a706046d5dc3a24e564362b73a9b814e5bfe500c8baab60c22d2e languageName: node linkType: hard "@babel/generator@npm:^7.22.10, @babel/generator@npm:^7.7.2": version: 7.22.10 resolution: "@babel/generator@npm:7.22.10" dependencies: "@babel/types": ^7.22.10 "@jridgewell/gen-mapping": ^0.3.2 "@jridgewell/trace-mapping": ^0.3.17 jsesc: ^2.5.1 checksum: 59a79730abdff9070692834bd3af179e7a9413fa2ff7f83dff3eb888765aeaeb2bfc7b0238a49613ed56e1af05956eff303cc139f2407eda8df974813e486074 languageName: node linkType: hard "@babel/generator@npm:^7.23.0": version: 7.23.0 resolution: "@babel/generator@npm:7.23.0" dependencies: "@babel/types": ^7.23.0 "@jridgewell/gen-mapping": ^0.3.2 "@jridgewell/trace-mapping": ^0.3.17 jsesc: ^2.5.1 checksum: 8efe24adad34300f1f8ea2add420b28171a646edc70f2a1b3e1683842f23b8b7ffa7e35ef0119294e1901f45bfea5b3dc70abe1f10a1917ccdfb41bed69be5f1 languageName: node linkType: hard "@babel/helper-compilation-targets@npm:^7.22.10": version: 7.22.10 resolution: "@babel/helper-compilation-targets@npm:7.22.10" dependencies: "@babel/compat-data": ^7.22.9 "@babel/helper-validator-option": ^7.22.5 browserslist: ^4.21.9 lru-cache: ^5.1.1 semver: ^6.3.1 checksum: f6f1896816392bcff671bbe6e277307729aee53befb4a66ea126e2a91eda78d819a70d06fa384c74ef46c1595544b94dca50bef6c78438d9ffd31776dafbd435 languageName: node linkType: hard "@babel/helper-environment-visitor@npm:^7.22.20": version: 7.22.20 resolution: "@babel/helper-environment-visitor@npm:7.22.20" checksum: d80ee98ff66f41e233f36ca1921774c37e88a803b2f7dca3db7c057a5fea0473804db9fb6729e5dbfd07f4bed722d60f7852035c2c739382e84c335661590b69 languageName: node linkType: hard "@babel/helper-environment-visitor@npm:^7.22.5": version: 7.22.5 resolution: "@babel/helper-environment-visitor@npm:7.22.5" checksum: 248532077d732a34cd0844eb7b078ff917c3a8ec81a7f133593f71a860a582f05b60f818dc5049c2212e5baa12289c27889a4b81d56ef409b4863db49646c4b1 languageName: node linkType: hard "@babel/helper-function-name@npm:^7.23.0": version: 7.23.0 resolution: "@babel/helper-function-name@npm:7.23.0" dependencies: "@babel/template": ^7.22.15 "@babel/types": ^7.23.0 checksum: e44542257b2d4634a1f979244eb2a4ad8e6d75eb6761b4cfceb56b562f7db150d134bc538c8e6adca3783e3bc31be949071527aa8e3aab7867d1ad2d84a26e10 languageName: node linkType: hard "@babel/helper-hoist-variables@npm:^7.22.5": version: 7.22.5 resolution: "@babel/helper-hoist-variables@npm:7.22.5" dependencies: "@babel/types": ^7.22.5 checksum: 394ca191b4ac908a76e7c50ab52102669efe3a1c277033e49467913c7ed6f7c64d7eacbeabf3bed39ea1f41731e22993f763b1edce0f74ff8563fd1f380d92cc languageName: node linkType: hard "@babel/helper-module-imports@npm:^7.22.5": version: 7.22.5 resolution: "@babel/helper-module-imports@npm:7.22.5" dependencies: "@babel/types": ^7.22.5 checksum: 9ac2b0404fa38b80bdf2653fbeaf8e8a43ccb41bd505f9741d820ed95d3c4e037c62a1bcdcb6c9527d7798d2e595924c4d025daed73283badc180ada2c9c49ad languageName: node linkType: hard "@babel/helper-module-transforms@npm:^7.22.9": version: 7.22.9 resolution: "@babel/helper-module-transforms@npm:7.22.9" dependencies: "@babel/helper-environment-visitor": ^7.22.5 "@babel/helper-module-imports": ^7.22.5 "@babel/helper-simple-access": ^7.22.5 "@babel/helper-split-export-declaration": ^7.22.6 "@babel/helper-validator-identifier": ^7.22.5 peerDependencies: "@babel/core": ^7.0.0 checksum: 2751f77660518cf4ff027514d6f4794f04598c6393be7b04b8e46c6e21606e11c19f3f57ab6129a9c21bacdf8b3ffe3af87bb401d972f34af2d0ffde02ac3001 languageName: node linkType: hard "@babel/helper-plugin-utils@npm:^7.0.0, @babel/helper-plugin-utils@npm:^7.10.4, @babel/helper-plugin-utils@npm:^7.12.13, @babel/helper-plugin-utils@npm:^7.14.5, @babel/helper-plugin-utils@npm:^7.22.5, @babel/helper-plugin-utils@npm:^7.8.0": version: 7.22.5 resolution: "@babel/helper-plugin-utils@npm:7.22.5" checksum: c0fc7227076b6041acd2f0e818145d2e8c41968cc52fb5ca70eed48e21b8fe6dd88a0a91cbddf4951e33647336eb5ae184747ca706817ca3bef5e9e905151ff5 languageName: node linkType: hard "@babel/helper-simple-access@npm:^7.22.5": version: 7.22.5 resolution: "@babel/helper-simple-access@npm:7.22.5" dependencies: "@babel/types": ^7.22.5 checksum: fe9686714caf7d70aedb46c3cce090f8b915b206e09225f1e4dbc416786c2fdbbee40b38b23c268b7ccef749dd2db35f255338fb4f2444429874d900dede5ad2 languageName: node linkType: hard "@babel/helper-split-export-declaration@npm:^7.22.6": version: 7.22.6 resolution: "@babel/helper-split-export-declaration@npm:7.22.6" dependencies: "@babel/types": ^7.22.5 checksum: e141cace583b19d9195f9c2b8e17a3ae913b7ee9b8120246d0f9ca349ca6f03cb2c001fd5ec57488c544347c0bb584afec66c936511e447fd20a360e591ac921 languageName: node linkType: hard "@babel/helper-string-parser@npm:^7.22.5": version: 7.22.5 resolution: "@babel/helper-string-parser@npm:7.22.5" checksum: 836851ca5ec813077bbb303acc992d75a360267aa3b5de7134d220411c852a6f17de7c0d0b8c8dcc0f567f67874c00f4528672b2a4f1bc978a3ada64c8c78467 languageName: node linkType: hard "@babel/helper-validator-identifier@npm:^7.22.20": version: 7.22.20 resolution: "@babel/helper-validator-identifier@npm:7.22.20" checksum: 136412784d9428266bcdd4d91c32bcf9ff0e8d25534a9d94b044f77fe76bc50f941a90319b05aafd1ec04f7d127cd57a179a3716009ff7f3412ef835ada95bdc languageName: node linkType: hard "@babel/helper-validator-identifier@npm:^7.22.5": version: 7.22.5 resolution: "@babel/helper-validator-identifier@npm:7.22.5" checksum: 7f0f30113474a28298c12161763b49de5018732290ca4de13cdaefd4fd0d635a6fe3f6686c37a02905fb1e64f21a5ee2b55140cf7b070e729f1bd66866506aea languageName: node linkType: hard "@babel/helper-validator-option@npm:^7.22.5": version: 7.22.5 resolution: "@babel/helper-validator-option@npm:7.22.5" checksum: bbeca8a85ee86990215c0424997438b388b8d642d69b9f86c375a174d3cdeb270efafd1ff128bc7a1d370923d13b6e45829ba8581c027620e83e3a80c5c414b3 languageName: node linkType: hard "@babel/helpers@npm:^7.22.11": version: 7.22.11 resolution: "@babel/helpers@npm:7.22.11" dependencies: "@babel/template": ^7.22.5 "@babel/traverse": ^7.22.11 "@babel/types": ^7.22.11 checksum: 93186544228b5e371486466ec3b86a77cce91beeff24a5670ca8ec46d50328f7700dab82d532351286e9d68624dc51d6d71589b051dd9535e44be077a43ec013 languageName: node linkType: hard "@babel/highlight@npm:^7.22.10": version: 7.22.10 resolution: "@babel/highlight@npm:7.22.10" dependencies: "@babel/helper-validator-identifier": ^7.22.5 chalk: ^2.4.2 js-tokens: ^4.0.0 checksum: f714a1e1a72dd9d72f6383f4f30fd342e21a8df32d984a4ea8f5eab691bb6ba6db2f8823d4b4cf135d98869e7a98925b81306aa32ee3c429f8cfa52c75889e1b languageName: node linkType: hard "@babel/highlight@npm:^7.22.13": version: 7.22.20 resolution: "@babel/highlight@npm:7.22.20" dependencies: "@babel/helper-validator-identifier": ^7.22.20 chalk: ^2.4.2 js-tokens: ^4.0.0 checksum: 84bd034dca309a5e680083cd827a766780ca63cef37308404f17653d32366ea76262bd2364b2d38776232f2d01b649f26721417d507e8b4b6da3e4e739f6d134 languageName: node linkType: hard "@babel/parser@npm:^7.1.0, @babel/parser@npm:^7.14.7, @babel/parser@npm:^7.20.7, @babel/parser@npm:^7.22.11, @babel/parser@npm:^7.22.5": version: 7.22.11 resolution: "@babel/parser@npm:7.22.11" bin: parser: ./bin/babel-parser.js checksum: 332079ed09794d3685343e9fc39c6a12dcb6ea589119f2135952cdef2424296786bb609a33f6dfa9be271797bbf8339f1865118418ea50b32a0c701734c96664 languageName: node linkType: hard "@babel/parser@npm:^7.22.15, @babel/parser@npm:^7.23.0": version: 7.23.0 resolution: "@babel/parser@npm:7.23.0" bin: parser: ./bin/babel-parser.js checksum: 453fdf8b9e2c2b7d7b02139e0ce003d1af21947bbc03eb350fb248ee335c9b85e4ab41697ddbdd97079698de825a265e45a0846bb2ed47a2c7c1df833f42a354 languageName: node linkType: hard "@babel/plugin-syntax-async-generators@npm:^7.8.4": version: 7.8.4 resolution: "@babel/plugin-syntax-async-generators@npm:7.8.4" dependencies: "@babel/helper-plugin-utils": ^7.8.0 peerDependencies: "@babel/core": ^7.0.0-0 checksum: 7ed1c1d9b9e5b64ef028ea5e755c0be2d4e5e4e3d6cf7df757b9a8c4cfa4193d268176d0f1f7fbecdda6fe722885c7fda681f480f3741d8a2d26854736f05367 languageName: node linkType: hard "@babel/plugin-syntax-bigint@npm:^7.8.3": version: 7.8.3 resolution: "@babel/plugin-syntax-bigint@npm:7.8.3" dependencies: "@babel/helper-plugin-utils": ^7.8.0 peerDependencies: "@babel/core": ^7.0.0-0 checksum: 3a10849d83e47aec50f367a9e56a6b22d662ddce643334b087f9828f4c3dd73bdc5909aaeabe123fed78515767f9ca43498a0e621c438d1cd2802d7fae3c9648 languageName: node linkType: hard "@babel/plugin-syntax-class-properties@npm:^7.8.3": version: 7.12.13 resolution: "@babel/plugin-syntax-class-properties@npm:7.12.13" dependencies: "@babel/helper-plugin-utils": ^7.12.13 peerDependencies: "@babel/core": ^7.0.0-0 checksum: 24f34b196d6342f28d4bad303612d7ff566ab0a013ce89e775d98d6f832969462e7235f3e7eaf17678a533d4be0ba45d3ae34ab4e5a9dcbda5d98d49e5efa2fc languageName: node linkType: hard "@babel/plugin-syntax-import-meta@npm:^7.8.3": version: 7.10.4 resolution: "@babel/plugin-syntax-import-meta@npm:7.10.4" dependencies: "@babel/helper-plugin-utils": ^7.10.4 peerDependencies: "@babel/core": ^7.0.0-0 checksum: 166ac1125d10b9c0c430e4156249a13858c0366d38844883d75d27389621ebe651115cb2ceb6dc011534d5055719fa1727b59f39e1ab3ca97820eef3dcab5b9b languageName: node linkType: hard "@babel/plugin-syntax-json-strings@npm:^7.8.3": version: 7.8.3 resolution: "@babel/plugin-syntax-json-strings@npm:7.8.3" dependencies: "@babel/helper-plugin-utils": ^7.8.0 peerDependencies: "@babel/core": ^7.0.0-0 checksum: bf5aea1f3188c9a507e16efe030efb996853ca3cadd6512c51db7233cc58f3ac89ff8c6bdfb01d30843b161cfe7d321e1bf28da82f7ab8d7e6bc5464666f354a languageName: node linkType: hard "@babel/plugin-syntax-jsx@npm:^7.7.2": version: 7.22.5 resolution: "@babel/plugin-syntax-jsx@npm:7.22.5" dependencies: "@babel/helper-plugin-utils": ^7.22.5 peerDependencies: "@babel/core": ^7.0.0-0 checksum: 8829d30c2617ab31393d99cec2978e41f014f4ac6f01a1cecf4c4dd8320c3ec12fdc3ce121126b2d8d32f6887e99ca1a0bad53dedb1e6ad165640b92b24980ce languageName: node linkType: hard "@babel/plugin-syntax-logical-assignment-operators@npm:^7.8.3": version: 7.10.4 resolution: "@babel/plugin-syntax-logical-assignment-operators@npm:7.10.4" dependencies: "@babel/helper-plugin-utils": ^7.10.4 peerDependencies: "@babel/core": ^7.0.0-0 checksum: aff33577037e34e515911255cdbb1fd39efee33658aa00b8a5fd3a4b903585112d037cce1cc9e4632f0487dc554486106b79ccd5ea63a2e00df4363f6d4ff886 languageName: node linkType: hard "@babel/plugin-syntax-nullish-coalescing-operator@npm:^7.8.3": version: 7.8.3 resolution: "@babel/plugin-syntax-nullish-coalescing-operator@npm:7.8.3" dependencies: "@babel/helper-plugin-utils": ^7.8.0 peerDependencies: "@babel/core": ^7.0.0-0 checksum: 87aca4918916020d1fedba54c0e232de408df2644a425d153be368313fdde40d96088feed6c4e5ab72aac89be5d07fef2ddf329a15109c5eb65df006bf2580d1 languageName: node linkType: hard "@babel/plugin-syntax-numeric-separator@npm:^7.8.3": version: 7.10.4 resolution: "@babel/plugin-syntax-numeric-separator@npm:7.10.4" dependencies: "@babel/helper-plugin-utils": ^7.10.4 peerDependencies: "@babel/core": ^7.0.0-0 checksum: 01ec5547bd0497f76cc903ff4d6b02abc8c05f301c88d2622b6d834e33a5651aa7c7a3d80d8d57656a4588f7276eba357f6b7e006482f5b564b7a6488de493a1 languageName: node linkType: hard "@babel/plugin-syntax-object-rest-spread@npm:^7.8.3": version: 7.8.3 resolution: "@babel/plugin-syntax-object-rest-spread@npm:7.8.3" dependencies: "@babel/helper-plugin-utils": ^7.8.0 peerDependencies: "@babel/core": ^7.0.0-0 checksum: fddcf581a57f77e80eb6b981b10658421bc321ba5f0a5b754118c6a92a5448f12a0c336f77b8abf734841e102e5126d69110a306eadb03ca3e1547cab31f5cbf languageName: node linkType: hard "@babel/plugin-syntax-optional-catch-binding@npm:^7.8.3": version: 7.8.3 resolution: "@babel/plugin-syntax-optional-catch-binding@npm:7.8.3" dependencies: "@babel/helper-plugin-utils": ^7.8.0 peerDependencies: "@babel/core": ^7.0.0-0 checksum: 910d90e72bc90ea1ce698e89c1027fed8845212d5ab588e35ef91f13b93143845f94e2539d831dc8d8ededc14ec02f04f7bd6a8179edd43a326c784e7ed7f0b9 languageName: node linkType: hard "@babel/plugin-syntax-optional-chaining@npm:^7.8.3": version: 7.8.3 resolution: "@babel/plugin-syntax-optional-chaining@npm:7.8.3" dependencies: "@babel/helper-plugin-utils": ^7.8.0 peerDependencies: "@babel/core": ^7.0.0-0 checksum: eef94d53a1453361553c1f98b68d17782861a04a392840341bc91780838dd4e695209c783631cf0de14c635758beafb6a3a65399846ffa4386bff90639347f30 languageName: node linkType: hard "@babel/plugin-syntax-top-level-await@npm:^7.8.3": version: 7.14.5 resolution: "@babel/plugin-syntax-top-level-await@npm:7.14.5" dependencies: "@babel/helper-plugin-utils": ^7.14.5 peerDependencies: "@babel/core": ^7.0.0-0 checksum: bbd1a56b095be7820029b209677b194db9b1d26691fe999856462e66b25b281f031f3dfd91b1619e9dcf95bebe336211833b854d0fb8780d618e35667c2d0d7e languageName: node linkType: hard "@babel/plugin-syntax-typescript@npm:^7.7.2": version: 7.22.5 resolution: "@babel/plugin-syntax-typescript@npm:7.22.5" dependencies: "@babel/helper-plugin-utils": ^7.22.5 peerDependencies: "@babel/core": ^7.0.0-0 checksum: 8ab7718fbb026d64da93681a57797d60326097fd7cb930380c8bffd9eb101689e90142c760a14b51e8e69c88a73ba3da956cb4520a3b0c65743aee5c71ef360a languageName: node linkType: hard "@babel/template@npm:^7.22.15": version: 7.22.15 resolution: "@babel/template@npm:7.22.15" dependencies: "@babel/code-frame": ^7.22.13 "@babel/parser": ^7.22.15 "@babel/types": ^7.22.15 checksum: 1f3e7dcd6c44f5904c184b3f7fe280394b191f2fed819919ffa1e529c259d5b197da8981b6ca491c235aee8dbad4a50b7e31304aa531271cb823a4a24a0dd8fd languageName: node linkType: hard "@babel/template@npm:^7.22.5, @babel/template@npm:^7.3.3": version: 7.22.5 resolution: "@babel/template@npm:7.22.5" dependencies: "@babel/code-frame": ^7.22.5 "@babel/parser": ^7.22.5 "@babel/types": ^7.22.5 checksum: c5746410164039aca61829cdb42e9a55410f43cace6f51ca443313f3d0bdfa9a5a330d0b0df73dc17ef885c72104234ae05efede37c1cc8a72dc9f93425977a3 languageName: node linkType: hard "@babel/traverse@npm:^7.22.11": version: 7.23.2 resolution: "@babel/traverse@npm:7.23.2" dependencies: "@babel/code-frame": ^7.22.13 "@babel/generator": ^7.23.0 "@babel/helper-environment-visitor": ^7.22.20 "@babel/helper-function-name": ^7.23.0 "@babel/helper-hoist-variables": ^7.22.5 "@babel/helper-split-export-declaration": ^7.22.6 "@babel/parser": ^7.23.0 "@babel/types": ^7.23.0 debug: ^4.1.0 globals: ^11.1.0 checksum: 26a1eea0dde41ab99dde8b9773a013a0dc50324e5110a049f5d634e721ff08afffd54940b3974a20308d7952085ac769689369e9127dea655f868c0f6e1ab35d languageName: node linkType: hard "@babel/types@npm:^7.0.0, @babel/types@npm:^7.20.7, @babel/types@npm:^7.22.10, @babel/types@npm:^7.22.11, @babel/types@npm:^7.22.5, @babel/types@npm:^7.3.3, @babel/types@npm:^7.8.3": version: 7.22.11 resolution: "@babel/types@npm:7.22.11" dependencies: "@babel/helper-string-parser": ^7.22.5 "@babel/helper-validator-identifier": ^7.22.5 to-fast-properties: ^2.0.0 checksum: 431a6446896adb62c876d0fe75263835735d3c974aae05356a87eb55f087c20a777028cf08eadcace7993e058bbafe3b21ce2119363222c6cef9eedd7a204810 languageName: node linkType: hard "@babel/types@npm:^7.22.15, @babel/types@npm:^7.23.0": version: 7.23.0 resolution: "@babel/types@npm:7.23.0" dependencies: "@babel/helper-string-parser": ^7.22.5 "@babel/helper-validator-identifier": ^7.22.20 to-fast-properties: ^2.0.0 checksum: 215fe04bd7feef79eeb4d33374b39909ce9cad1611c4135a4f7fdf41fe3280594105af6d7094354751514625ea92d0875aba355f53e86a92600f290e77b0e604 languageName: node linkType: hard "@bcoe/v8-coverage@npm:^0.2.3": version: 0.2.3 resolution: "@bcoe/v8-coverage@npm:0.2.3" checksum: 850f9305536d0f2bd13e9e0881cb5f02e4f93fad1189f7b2d4bebf694e3206924eadee1068130d43c11b750efcc9405f88a8e42ef098b6d75239c0f047de1a27 languageName: node linkType: hard "@eslint-community/eslint-utils@npm:^4.2.0": version: 4.4.0 resolution: "@eslint-community/eslint-utils@npm:4.4.0" dependencies: eslint-visitor-keys: ^3.3.0 peerDependencies: eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 checksum: cdfe3ae42b4f572cbfb46d20edafe6f36fc5fb52bf2d90875c58aefe226892b9677fef60820e2832caf864a326fe4fc225714c46e8389ccca04d5f9288aabd22 languageName: node linkType: hard "@eslint-community/regexpp@npm:^4.4.0, @eslint-community/regexpp@npm:^4.6.1": version: 4.7.0 resolution: "@eslint-community/regexpp@npm:4.7.0" checksum: 09b8d11a9957b58be870d76e36b718030ba2215e1fb9d009f7a0833733c86b47d8528c47808eeef389145ca198abc3ea4d169452840e36142ecfb9491e3a1d16 languageName: node linkType: hard "@eslint/eslintrc@npm:^2.1.2": version: 2.1.2 resolution: "@eslint/eslintrc@npm:2.1.2" dependencies: ajv: ^6.12.4 debug: ^4.3.2 espree: ^9.6.0 globals: ^13.19.0 ignore: ^5.2.0 import-fresh: ^3.2.1 js-yaml: ^4.1.0 minimatch: ^3.1.2 strip-json-comments: ^3.1.1 checksum: bc742a1e3b361f06fedb4afb6bf32cbd27171292ef7924f61c62f2aed73048367bcc7ac68f98c06d4245cd3fabc43270f844e3c1699936d4734b3ac5398814a7 languageName: node linkType: hard "@eslint/js@npm:^8.47.0": version: 8.47.0 resolution: "@eslint/js@npm:8.47.0" checksum: 0ef57fe27b6d4c305b33f3b2d2fee1ab397a619006f1d6f4ce5ee4746b8f03d11a4e098805a7d78601ca534cf72917d37f0ac19896c992a32e26299ecb9f9de1 languageName: node linkType: hard "@humanwhocodes/config-array@npm:^0.11.10": version: 0.11.10 resolution: "@humanwhocodes/config-array@npm:0.11.10" dependencies: "@humanwhocodes/object-schema": ^1.2.1 debug: ^4.1.1 minimatch: ^3.0.5 checksum: 1b1302e2403d0e35bc43e66d67a2b36b0ad1119efc704b5faff68c41f791a052355b010fb2d27ef022670f550de24cd6d08d5ecf0821c16326b7dcd0ee5d5d8a languageName: node linkType: hard "@humanwhocodes/module-importer@npm:^1.0.1": version: 1.0.1 resolution: "@humanwhocodes/module-importer@npm:1.0.1" checksum: 0fd22007db8034a2cdf2c764b140d37d9020bbfce8a49d3ec5c05290e77d4b0263b1b972b752df8c89e5eaa94073408f2b7d977aed131faf6cf396ebb5d7fb61 languageName: node linkType: hard "@humanwhocodes/object-schema@npm:^1.2.1": version: 1.2.1 resolution: "@humanwhocodes/object-schema@npm:1.2.1" checksum: a824a1ec31591231e4bad5787641f59e9633827d0a2eaae131a288d33c9ef0290bd16fda8da6f7c0fcb014147865d12118df10db57f27f41e20da92369fcb3f1 languageName: node linkType: hard "@isaacs/cliui@npm:^8.0.2": version: 8.0.2 resolution: "@isaacs/cliui@npm:8.0.2" dependencies: string-width: ^5.1.2 string-width-cjs: "npm:string-width@^4.2.0" strip-ansi: ^7.0.1 strip-ansi-cjs: "npm:strip-ansi@^6.0.1" wrap-ansi: ^8.1.0 wrap-ansi-cjs: "npm:wrap-ansi@^7.0.0" checksum: 4a473b9b32a7d4d3cfb7a614226e555091ff0c5a29a1734c28c72a182c2f6699b26fc6b5c2131dfd841e86b185aea714c72201d7c98c2fba5f17709333a67aeb languageName: node linkType: hard "@istanbuljs/load-nyc-config@npm:^1.0.0": version: 1.1.0 resolution: "@istanbuljs/load-nyc-config@npm:1.1.0" dependencies: camelcase: ^5.3.1 find-up: ^4.1.0 get-package-type: ^0.1.0 js-yaml: ^3.13.1 resolve-from: ^5.0.0 checksum: d578da5e2e804d5c93228450a1380e1a3c691de4953acc162f387b717258512a3e07b83510a936d9fab03eac90817473917e24f5d16297af3867f59328d58568 languageName: node linkType: hard "@istanbuljs/schema@npm:^0.1.2": version: 0.1.3 resolution: "@istanbuljs/schema@npm:0.1.3" checksum: 5282759d961d61350f33d9118d16bcaed914ebf8061a52f4fa474b2cb08720c9c81d165e13b82f2e5a8a212cc5af482f0c6fc1ac27b9e067e5394c9a6ed186c9 languageName: node linkType: hard "@jest/console@npm:^29.6.4": version: 29.6.4 resolution: "@jest/console@npm:29.6.4" dependencies: "@jest/types": ^29.6.3 "@types/node": "*" chalk: ^4.0.0 jest-message-util: ^29.6.3 jest-util: ^29.6.3 slash: ^3.0.0 checksum: 1caf061a39266b86e96ca13358401839e4d930742cbaa9e87e79d7ce170a83195e52e5b2d22eb5aa9a949219b61a163a81e337ec98b8323d88d79853051df96c languageName: node linkType: hard "@jest/core@npm:^29.6.4": version: 29.6.4 resolution: "@jest/core@npm:29.6.4" dependencies: "@jest/console": ^29.6.4 "@jest/reporters": ^29.6.4 "@jest/test-result": ^29.6.4 "@jest/transform": ^29.6.4 "@jest/types": ^29.6.3 "@types/node": "*" ansi-escapes: ^4.2.1 chalk: ^4.0.0 ci-info: ^3.2.0 exit: ^0.1.2 graceful-fs: ^4.2.9 jest-changed-files: ^29.6.3 jest-config: ^29.6.4 jest-haste-map: ^29.6.4 jest-message-util: ^29.6.3 jest-regex-util: ^29.6.3 jest-resolve: ^29.6.4 jest-resolve-dependencies: ^29.6.4 jest-runner: ^29.6.4 jest-runtime: ^29.6.4 jest-snapshot: ^29.6.4 jest-util: ^29.6.3 jest-validate: ^29.6.3 jest-watcher: ^29.6.4 micromatch: ^4.0.4 pretty-format: ^29.6.3 slash: ^3.0.0 strip-ansi: ^6.0.0 peerDependencies: node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 peerDependenciesMeta: node-notifier: optional: true checksum: 0f36532c909775814cb7d4310d61881beaefdec6229ef0b7493c6191dfca20ae5222120846ea5ef8cdeaa8cef265aae9cea8989dcab572d8daea9afd14247c7a languageName: node linkType: hard "@jest/environment@npm:^29.6.4": version: 29.6.4 resolution: "@jest/environment@npm:29.6.4" dependencies: "@jest/fake-timers": ^29.6.4 "@jest/types": ^29.6.3 "@types/node": "*" jest-mock: ^29.6.3 checksum: 810d8f1fc26d293acfc44927bcb78adc58ed4ea580a64c8d94aa6c67239dcb149186bf25b94ff28b79de15253e0c877ad8d330feac205f185f3517593168510c languageName: node linkType: hard "@jest/expect-utils@npm:^29.6.4": version: 29.6.4 resolution: "@jest/expect-utils@npm:29.6.4" dependencies: jest-get-type: ^29.6.3 checksum: a17059e02a4c0fca98e2abb7e9e58c70df3cd3d4ebcc6a960cb57c571726f7bd738c6cd008a9bf99770b77e92f7e21c75fe1f9ceec9b7a7710010f9340bb28ad languageName: node linkType: hard "@jest/expect@npm:^29.6.4": version: 29.6.4 resolution: "@jest/expect@npm:29.6.4" dependencies: expect: ^29.6.4 jest-snapshot: ^29.6.4 checksum: e9d7306a96e2f9f9f7a0d93d41850cbad987ebda951a5d9a63d3f5fb61da4c1e41adb54af7f7222e4a185454ecb17ddc77845e18001ee28ac114f7a7fe9e671d languageName: node linkType: hard "@jest/fake-timers@npm:^29.6.4": version: 29.6.4 resolution: "@jest/fake-timers@npm:29.6.4" dependencies: "@jest/types": ^29.6.3 "@sinonjs/fake-timers": ^10.0.2 "@types/node": "*" jest-message-util: ^29.6.3 jest-mock: ^29.6.3 jest-util: ^29.6.3 checksum: 3f06d1090cbaaf781920fe59b10509ad86b587c401818a066ee1550101c6203e0718f0f83bbd2afa8bdf7b43eb280f89fb9f8c98886094e53ccabe5e64de9be1 languageName: node linkType: hard "@jest/globals@npm:^29.6.4": version: 29.6.4 resolution: "@jest/globals@npm:29.6.4" dependencies: "@jest/environment": ^29.6.4 "@jest/expect": ^29.6.4 "@jest/types": ^29.6.3 jest-mock: ^29.6.3 checksum: a41b18871a248151264668a38b13cb305f03db112bfd89ec44e858af0e79066e0b03d6b68c8baf1ec6c578be6fdb87519389c83438608b91471d17a5724858e0 languageName: node linkType: hard "@jest/reporters@npm:^29.6.4": version: 29.6.4 resolution: "@jest/reporters@npm:29.6.4" dependencies: "@bcoe/v8-coverage": ^0.2.3 "@jest/console": ^29.6.4 "@jest/test-result": ^29.6.4 "@jest/transform": ^29.6.4 "@jest/types": ^29.6.3 "@jridgewell/trace-mapping": ^0.3.18 "@types/node": "*" chalk: ^4.0.0 collect-v8-coverage: ^1.0.0 exit: ^0.1.2 glob: ^7.1.3 graceful-fs: ^4.2.9 istanbul-lib-coverage: ^3.0.0 istanbul-lib-instrument: ^6.0.0 istanbul-lib-report: ^3.0.0 istanbul-lib-source-maps: ^4.0.0 istanbul-reports: ^3.1.3 jest-message-util: ^29.6.3 jest-util: ^29.6.3 jest-worker: ^29.6.4 slash: ^3.0.0 string-length: ^4.0.1 strip-ansi: ^6.0.0 v8-to-istanbul: ^9.0.1 peerDependencies: node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 peerDependenciesMeta: node-notifier: optional: true checksum: 9ee0db497f3a826f535d3af0575ceb67984f9708bc6386450359517c212c67218ae98b8ea93ab05df2f920aed9c4166ef64209d66a09b7e30fc0077c91347ad0 languageName: node linkType: hard "@jest/schemas@npm:^29.6.3": version: 29.6.3 resolution: "@jest/schemas@npm:29.6.3" dependencies: "@sinclair/typebox": ^0.27.8 checksum: 910040425f0fc93cd13e68c750b7885590b8839066dfa0cd78e7def07bbb708ad869381f725945d66f2284de5663bbecf63e8fdd856e2ae6e261ba30b1687e93 languageName: node linkType: hard "@jest/source-map@npm:^29.6.3": version: 29.6.3 resolution: "@jest/source-map@npm:29.6.3" dependencies: "@jridgewell/trace-mapping": ^0.3.18 callsites: ^3.0.0 graceful-fs: ^4.2.9 checksum: bcc5a8697d471396c0003b0bfa09722c3cd879ad697eb9c431e6164e2ea7008238a01a07193dfe3cbb48b1d258eb7251f6efcea36f64e1ebc464ea3c03ae2deb languageName: node linkType: hard "@jest/test-result@npm:^29.6.4": version: 29.6.4 resolution: "@jest/test-result@npm:29.6.4" dependencies: "@jest/console": ^29.6.4 "@jest/types": ^29.6.3 "@types/istanbul-lib-coverage": ^2.0.0 collect-v8-coverage: ^1.0.0 checksum: a13c82d29038e80059191a1a443240678c6934ea832fdabaec12b3ece397b6303022a064494a6bbd167a024f04e6b4d9ace1001300927ff70405ec9d854f1193 languageName: node linkType: hard "@jest/test-sequencer@npm:^29.6.4": version: 29.6.4 resolution: "@jest/test-sequencer@npm:29.6.4" dependencies: "@jest/test-result": ^29.6.4 graceful-fs: ^4.2.9 jest-haste-map: ^29.6.4 slash: ^3.0.0 checksum: 517fc66b74a87431a8a1429e4505d85bd09c11f2ba835e46c07c79911fbee23b89c01ec444c7c1d12d1b36f9eba60fcbbccc8e1bc1ae54a1a8b03b5f530ff81b languageName: node linkType: hard "@jest/transform@npm:^29.6.4": version: 29.6.4 resolution: "@jest/transform@npm:29.6.4" dependencies: "@babel/core": ^7.11.6 "@jest/types": ^29.6.3 "@jridgewell/trace-mapping": ^0.3.18 babel-plugin-istanbul: ^6.1.1 chalk: ^4.0.0 convert-source-map: ^2.0.0 fast-json-stable-stringify: ^2.1.0 graceful-fs: ^4.2.9 jest-haste-map: ^29.6.4 jest-regex-util: ^29.6.3 jest-util: ^29.6.3 micromatch: ^4.0.4 pirates: ^4.0.4 slash: ^3.0.0 write-file-atomic: ^4.0.2 checksum: 0341a200a0bb926fc67ab9aede91c7b4009458206495e92057e72a115c55da5fed117457e68c6ea821e24c58b55da75c6a7b0f272ed63c2693db583d689a3383 languageName: node linkType: hard "@jest/types@npm:^29.6.3": version: 29.6.3 resolution: "@jest/types@npm:29.6.3" dependencies: "@jest/schemas": ^29.6.3 "@types/istanbul-lib-coverage": ^2.0.0 "@types/istanbul-reports": ^3.0.0 "@types/node": "*" "@types/yargs": ^17.0.8 chalk: ^4.0.0 checksum: a0bcf15dbb0eca6bdd8ce61a3fb055349d40268622a7670a3b2eb3c3dbafe9eb26af59938366d520b86907b9505b0f9b29b85cec11579a9e580694b87cd90fcc languageName: node linkType: hard "@jridgewell/gen-mapping@npm:^0.3.0, @jridgewell/gen-mapping@npm:^0.3.2": version: 0.3.3 resolution: "@jridgewell/gen-mapping@npm:0.3.3" dependencies: "@jridgewell/set-array": ^1.0.1 "@jridgewell/sourcemap-codec": ^1.4.10 "@jridgewell/trace-mapping": ^0.3.9 checksum: 4a74944bd31f22354fc01c3da32e83c19e519e3bbadafa114f6da4522ea77dd0c2842607e923a591d60a76699d819a2fbb6f3552e277efdb9b58b081390b60ab languageName: node linkType: hard "@jridgewell/resolve-uri@npm:^3.1.0": version: 3.1.1 resolution: "@jridgewell/resolve-uri@npm:3.1.1" checksum: f5b441fe7900eab4f9155b3b93f9800a916257f4e8563afbcd3b5a5337b55e52bd8ae6735453b1b745457d9f6cdb16d74cd6220bbdd98cf153239e13f6cbb653 languageName: node linkType: hard "@jridgewell/set-array@npm:^1.0.1": version: 1.1.2 resolution: "@jridgewell/set-array@npm:1.1.2" checksum: 69a84d5980385f396ff60a175f7177af0b8da4ddb81824cb7016a9ef914eee9806c72b6b65942003c63f7983d4f39a5c6c27185bbca88eb4690b62075602e28e languageName: node linkType: hard "@jridgewell/sourcemap-codec@npm:^1.4.10, @jridgewell/sourcemap-codec@npm:^1.4.14": version: 1.4.15 resolution: "@jridgewell/sourcemap-codec@npm:1.4.15" checksum: b881c7e503db3fc7f3c1f35a1dd2655a188cc51a3612d76efc8a6eb74728bef5606e6758ee77423e564092b4a518aba569bbb21c9bac5ab7a35b0c6ae7e344c8 languageName: node linkType: hard "@jridgewell/trace-mapping@npm:^0.3.12, @jridgewell/trace-mapping@npm:^0.3.17, @jridgewell/trace-mapping@npm:^0.3.18, @jridgewell/trace-mapping@npm:^0.3.9": version: 0.3.19 resolution: "@jridgewell/trace-mapping@npm:0.3.19" dependencies: "@jridgewell/resolve-uri": ^3.1.0 "@jridgewell/sourcemap-codec": ^1.4.14 checksum: 956a6f0f6fec060fb48c6bf1f5ec2064e13cd38c8be3873877d4b92b4a27ba58289a34071752671262a3e3c202abcc3fa2aac64d8447b4b0fa1ba3c9047f1c20 languageName: node linkType: hard "@napi-rs/cli@npm:^2.14.6": version: 2.16.3 resolution: "@napi-rs/cli@npm:2.16.3" bin: napi: scripts/index.js checksum: 11f78b09548bc5c22df56e4fab4a87b68c2d3f2a18a55cf11e775e6a4cb5739ec0e21a14e614db2cdc2b9773cb42536c6bd00c3f85d3b461f956594f8a89ddcb languageName: node linkType: hard "@nodelib/fs.scandir@npm:2.1.5": version: 2.1.5 resolution: "@nodelib/fs.scandir@npm:2.1.5" dependencies: "@nodelib/fs.stat": 2.0.5 run-parallel: ^1.1.9 checksum: a970d595bd23c66c880e0ef1817791432dbb7acbb8d44b7e7d0e7a22f4521260d4a83f7f9fd61d44fda4610105577f8f58a60718105fb38352baed612fd79e59 languageName: node linkType: hard "@nodelib/fs.stat@npm:2.0.5, @nodelib/fs.stat@npm:^2.0.2": version: 2.0.5 resolution: "@nodelib/fs.stat@npm:2.0.5" checksum: 012480b5ca9d97bff9261571dbbec7bbc6033f69cc92908bc1ecfad0792361a5a1994bc48674b9ef76419d056a03efadfce5a6cf6dbc0a36559571a7a483f6f0 languageName: node linkType: hard "@nodelib/fs.walk@npm:^1.2.3, @nodelib/fs.walk@npm:^1.2.8": version: 1.2.8 resolution: "@nodelib/fs.walk@npm:1.2.8" dependencies: "@nodelib/fs.scandir": 2.1.5 fastq: ^1.6.0 checksum: 190c643f156d8f8f277bf2a6078af1ffde1fd43f498f187c2db24d35b4b4b5785c02c7dc52e356497b9a1b65b13edc996de08de0b961c32844364da02986dc53 languageName: node linkType: hard "@npmcli/fs@npm:^3.1.0": version: 3.1.0 resolution: "@npmcli/fs@npm:3.1.0" dependencies: semver: ^7.3.5 checksum: a50a6818de5fc557d0b0e6f50ec780a7a02ab8ad07e5ac8b16bf519e0ad60a144ac64f97d05c443c3367235d337182e1d012bbac0eb8dbae8dc7b40b193efd0e languageName: node linkType: hard "@pkgjs/parseargs@npm:^0.11.0": version: 0.11.0 resolution: "@pkgjs/parseargs@npm:0.11.0" checksum: 6ad6a00fc4f2f2cfc6bff76fb1d88b8ee20bc0601e18ebb01b6d4be583733a860239a521a7fbca73b612e66705078809483549d2b18f370eb346c5155c8e4a0f languageName: node linkType: hard "@sinclair/typebox@npm:^0.27.8": version: 0.27.8 resolution: "@sinclair/typebox@npm:0.27.8" checksum: 00bd7362a3439021aa1ea51b0e0d0a0e8ca1351a3d54c606b115fdcc49b51b16db6e5f43b4fe7a28c38688523e22a94d49dd31168868b655f0d4d50f032d07a1 languageName: node linkType: hard "@sinonjs/commons@npm:^3.0.0": version: 3.0.0 resolution: "@sinonjs/commons@npm:3.0.0" dependencies: type-detect: 4.0.8 checksum: b4b5b73d4df4560fb8c0c7b38c7ad4aeabedd362f3373859d804c988c725889cde33550e4bcc7cd316a30f5152a2d1d43db71b6d0c38f5feef71fd8d016763f8 languageName: node linkType: hard "@sinonjs/fake-timers@npm:^10.0.2": version: 10.3.0 resolution: "@sinonjs/fake-timers@npm:10.3.0" dependencies: "@sinonjs/commons": ^3.0.0 checksum: 614d30cb4d5201550c940945d44c9e0b6d64a888ff2cd5b357f95ad6721070d6b8839cd10e15b76bf5e14af0bcc1d8f9ec00d49a46318f1f669a4bec1d7f3148 languageName: node linkType: hard "@swc-node/core@npm:^1.10.5": version: 1.10.5 resolution: "@swc-node/core@npm:1.10.5" peerDependencies: "@swc/core": ">= 1.3" checksum: 642e230388fd53842a4ad2c7daff0bf4cf1327f0002de49a5fac19d74770f5a0a5b7dd265c1e2feafa08ef22fa5dfe7957e375c03834a3059adefecfdbbff274 languageName: node linkType: hard "@swc-node/register@npm:^1.5.5": version: 1.6.7 resolution: "@swc-node/register@npm:1.6.7" dependencies: "@swc-node/core": ^1.10.5 "@swc-node/sourcemap-support": ^0.3.0 colorette: ^2.0.19 debug: ^4.3.4 pirates: ^4.0.5 tslib: ^2.5.0 peerDependencies: "@swc/core": ">= 1.3" typescript: ">= 4.3" checksum: adb78dadbe52243c167b229743d02f7e9451624ac049017d6332a1f2d7f3c51e8569b4ef743d8343087ee6dc6691a644fb112d53985aaad30dc9a537b764860d languageName: node linkType: hard "@swc-node/sourcemap-support@npm:^0.3.0": version: 0.3.0 resolution: "@swc-node/sourcemap-support@npm:0.3.0" dependencies: source-map-support: ^0.5.21 tslib: ^2.5.0 checksum: a3c837ed790238ef88682eb342b75d756eba5eb3b6cfe6cf14a597bd78dfc9a9797f1e54a4977c1297e5324fba2e33bd76ab8aa9c396ad463693de2001180c9e languageName: node linkType: hard "@swc/core-darwin-arm64@npm:1.3.78": version: 1.3.78 resolution: "@swc/core-darwin-arm64@npm:1.3.78" conditions: os=darwin & cpu=arm64 languageName: node linkType: hard "@swc/core-darwin-x64@npm:1.3.78": version: 1.3.78 resolution: "@swc/core-darwin-x64@npm:1.3.78" conditions: os=darwin & cpu=x64 languageName: node linkType: hard "@swc/core-linux-arm-gnueabihf@npm:1.3.78": version: 1.3.78 resolution: "@swc/core-linux-arm-gnueabihf@npm:1.3.78" conditions: os=linux & cpu=arm languageName: node linkType: hard "@swc/core-linux-arm64-gnu@npm:1.3.78": version: 1.3.78 resolution: "@swc/core-linux-arm64-gnu@npm:1.3.78" conditions: os=linux & cpu=arm64 & libc=glibc languageName: node linkType: hard "@swc/core-linux-arm64-musl@npm:1.3.78": version: 1.3.78 resolution: "@swc/core-linux-arm64-musl@npm:1.3.78" conditions: os=linux & cpu=arm64 & libc=musl languageName: node linkType: hard "@swc/core-linux-x64-gnu@npm:1.3.78": version: 1.3.78 resolution: "@swc/core-linux-x64-gnu@npm:1.3.78" conditions: os=linux & cpu=x64 & libc=glibc languageName: node linkType: hard "@swc/core-linux-x64-musl@npm:1.3.78": version: 1.3.78 resolution: "@swc/core-linux-x64-musl@npm:1.3.78" conditions: os=linux & cpu=x64 & libc=musl languageName: node linkType: hard "@swc/core-win32-arm64-msvc@npm:1.3.78": version: 1.3.78 resolution: "@swc/core-win32-arm64-msvc@npm:1.3.78" conditions: os=win32 & cpu=arm64 languageName: node linkType: hard "@swc/core-win32-ia32-msvc@npm:1.3.78": version: 1.3.78 resolution: "@swc/core-win32-ia32-msvc@npm:1.3.78" conditions: os=win32 & cpu=ia32 languageName: node linkType: hard "@swc/core-win32-x64-msvc@npm:1.3.78": version: 1.3.78 resolution: "@swc/core-win32-x64-msvc@npm:1.3.78" conditions: os=win32 & cpu=x64 languageName: node linkType: hard "@swc/core@npm:^1.3.32": version: 1.3.78 resolution: "@swc/core@npm:1.3.78" dependencies: "@swc/core-darwin-arm64": 1.3.78 "@swc/core-darwin-x64": 1.3.78 "@swc/core-linux-arm-gnueabihf": 1.3.78 "@swc/core-linux-arm64-gnu": 1.3.78 "@swc/core-linux-arm64-musl": 1.3.78 "@swc/core-linux-x64-gnu": 1.3.78 "@swc/core-linux-x64-musl": 1.3.78 "@swc/core-win32-arm64-msvc": 1.3.78 "@swc/core-win32-ia32-msvc": 1.3.78 "@swc/core-win32-x64-msvc": 1.3.78 peerDependencies: "@swc/helpers": ^0.5.0 dependenciesMeta: "@swc/core-darwin-arm64": optional: true "@swc/core-darwin-x64": optional: true "@swc/core-linux-arm-gnueabihf": optional: true "@swc/core-linux-arm64-gnu": optional: true "@swc/core-linux-arm64-musl": optional: true "@swc/core-linux-x64-gnu": optional: true "@swc/core-linux-x64-musl": optional: true "@swc/core-win32-arm64-msvc": optional: true "@swc/core-win32-ia32-msvc": optional: true "@swc/core-win32-x64-msvc": optional: true peerDependenciesMeta: "@swc/helpers": optional: true checksum: b7494c4ca9a2e968cd00430c9dbb5cc4f0c3dd3baaf66d129185c5a8816caf6424a2da8e0f181ed94e7c0c39f76046ee995d6c5ec08198d5f5d6f60735ffb1ab languageName: node linkType: hard "@taplo/cli@npm:^0.5.2": version: 0.5.2 resolution: "@taplo/cli@npm:0.5.2" bin: taplo: dist/cli.js checksum: c2e0e584172bfee1cca6624bdb4470259179e232472fc7f4bbbd2e0127233039b9ace21a8d6b8d5081b157d9f046dc942ab27a634e23924b8c8a6096f1d04e27 languageName: node linkType: hard "@tootallnate/once@npm:2": version: 2.0.0 resolution: "@tootallnate/once@npm:2.0.0" checksum: ad87447820dd3f24825d2d947ebc03072b20a42bfc96cbafec16bff8bbda6c1a81fcb0be56d5b21968560c5359a0af4038a68ba150c3e1694fe4c109a063bed8 languageName: node linkType: hard "@types/babel__core@npm:^7.1.14": version: 7.20.1 resolution: "@types/babel__core@npm:7.20.1" dependencies: "@babel/parser": ^7.20.7 "@babel/types": ^7.20.7 "@types/babel__generator": "*" "@types/babel__template": "*" "@types/babel__traverse": "*" checksum: 9fcd9691a33074802d9057ff70b0e3ff3778f52470475b68698a0f6714fbe2ccb36c16b43dc924eb978cd8a81c1f845e5ff4699e7a47606043b539eb8c6331a8 languageName: node linkType: hard "@types/babel__generator@npm:*": version: 7.6.4 resolution: "@types/babel__generator@npm:7.6.4" dependencies: "@babel/types": ^7.0.0 checksum: 20effbbb5f8a3a0211e95959d06ae70c097fb6191011b73b38fe86deebefad8e09ee014605e0fd3cdaedc73d158be555866810e9166e1f09e4cfd880b874dcb0 languageName: node linkType: hard "@types/babel__template@npm:*": version: 7.4.1 resolution: "@types/babel__template@npm:7.4.1" dependencies: "@babel/parser": ^7.1.0 "@babel/types": ^7.0.0 checksum: 649fe8b42c2876be1fd28c6ed9b276f78152d5904ec290b6c861d9ef324206e0a5c242e8305c421ac52ecf6358fa7e32ab7a692f55370484825c1df29b1596ee languageName: node linkType: hard "@types/babel__traverse@npm:*, @types/babel__traverse@npm:^7.0.6": version: 7.20.1 resolution: "@types/babel__traverse@npm:7.20.1" dependencies: "@babel/types": ^7.20.7 checksum: 58341e23c649c0eba134a1682d4f20d027fad290d92e5740faa1279978f6ed476fc467ae51ce17a877e2566d805aeac64eae541168994367761ec883a4150221 languageName: node linkType: hard "@types/graceful-fs@npm:^4.1.3": version: 4.1.6 resolution: "@types/graceful-fs@npm:4.1.6" dependencies: "@types/node": "*" checksum: c3070ccdc9ca0f40df747bced1c96c71a61992d6f7c767e8fd24bb6a3c2de26e8b84135ede000b7e79db530a23e7e88dcd9db60eee6395d0f4ce1dae91369dd4 languageName: node linkType: hard "@types/istanbul-lib-coverage@npm:*, @types/istanbul-lib-coverage@npm:^2.0.0, @types/istanbul-lib-coverage@npm:^2.0.1": version: 2.0.4 resolution: "@types/istanbul-lib-coverage@npm:2.0.4" checksum: a25d7589ee65c94d31464c16b72a9dc81dfa0bea9d3e105ae03882d616e2a0712a9c101a599ec482d297c3591e16336962878cb3eb1a0a62d5b76d277a890ce7 languageName: node linkType: hard "@types/istanbul-lib-report@npm:*": version: 3.0.0 resolution: "@types/istanbul-lib-report@npm:3.0.0" dependencies: "@types/istanbul-lib-coverage": "*" checksum: 656398b62dc288e1b5226f8880af98087233cdb90100655c989a09f3052b5775bf98ba58a16c5ae642fb66c61aba402e07a9f2bff1d1569e3b306026c59f3f36 languageName: node linkType: hard "@types/istanbul-reports@npm:^3.0.0": version: 3.0.1 resolution: "@types/istanbul-reports@npm:3.0.1" dependencies: "@types/istanbul-lib-report": "*" checksum: f1ad54bc68f37f60b30c7915886b92f86b847033e597f9b34f2415acdbe5ed742fa559a0a40050d74cdba3b6a63c342cac1f3a64dba5b68b66a6941f4abd7903 languageName: node linkType: hard "@types/jest@npm:^29.5.1": version: 29.5.4 resolution: "@types/jest@npm:29.5.4" dependencies: expect: ^29.0.0 pretty-format: ^29.0.0 checksum: 38ed5942f44336452efd0f071eab60aaa57cd8d46530348d0a3aa5a691dcbf1366c4ca8f6ee8364efb45b4413bfefae443e5d4f469246a472a03b21ac11cd4ed languageName: node linkType: hard "@types/json-schema@npm:^7.0.9": version: 7.0.9 resolution: "@types/json-schema@npm:7.0.9" checksum: 259d0e25f11a21ba5c708f7ea47196bd396e379fddb79c76f9f4f62c945879dc21657904914313ec2754e443c5018ea8372362f323f30e0792897fdb2098a705 languageName: node linkType: hard "@types/json5@npm:^0.0.29": version: 0.0.29 resolution: "@types/json5@npm:0.0.29" checksum: e60b153664572116dfea673c5bda7778dbff150498f44f998e34b5886d8afc47f16799280e4b6e241c0472aef1bc36add771c569c68fc5125fc2ae519a3eb9ac languageName: node linkType: hard "@types/node@npm:*": version: 20.5.4 resolution: "@types/node@npm:20.5.4" checksum: 18de76c990e1c298183661cfc7cf16c9384531538c8090cc27bc3cab801e7c46b9f6943591ab0433955dd57961601684cc0fb19ba2deee9609e137c91afd2e25 languageName: node linkType: hard "@types/semver@npm:^7.3.12": version: 7.5.0 resolution: "@types/semver@npm:7.5.0" checksum: 0a64b9b9c7424d9a467658b18dd70d1d781c2d6f033096a6e05762d20ebbad23c1b69b0083b0484722aabf35640b78ccc3de26368bcae1129c87e9df028a22e2 languageName: node linkType: hard "@types/stack-utils@npm:^2.0.0": version: 2.0.1 resolution: "@types/stack-utils@npm:2.0.1" checksum: 205fdbe3326b7046d7eaf5e494d8084f2659086a266f3f9cf00bccc549c8e36e407f88168ad4383c8b07099957ad669f75f2532ed4bc70be2b037330f7bae019 languageName: node linkType: hard "@types/yargs-parser@npm:*": version: 21.0.0 resolution: "@types/yargs-parser@npm:21.0.0" checksum: b2f4c8d12ac18a567440379909127cf2cec393daffb73f246d0a25df36ea983b93b7e9e824251f959e9f928cbc7c1aab6728d0a0ff15d6145f66cec2be67d9a2 languageName: node linkType: hard "@types/yargs@npm:^17.0.8": version: 17.0.24 resolution: "@types/yargs@npm:17.0.24" dependencies: "@types/yargs-parser": "*" checksum: 5f3ac4dc4f6e211c1627340160fbe2fd247ceba002190da6cf9155af1798450501d628c9165a183f30a224fc68fa5e700490d740ff4c73e2cdef95bc4e8ba7bf languageName: node linkType: hard "@typescript-eslint/eslint-plugin@npm:^5.50.0": version: 5.62.0 resolution: "@typescript-eslint/eslint-plugin@npm:5.62.0" dependencies: "@eslint-community/regexpp": ^4.4.0 "@typescript-eslint/scope-manager": 5.62.0 "@typescript-eslint/type-utils": 5.62.0 "@typescript-eslint/utils": 5.62.0 debug: ^4.3.4 graphemer: ^1.4.0 ignore: ^5.2.0 natural-compare-lite: ^1.4.0 semver: ^7.3.7 tsutils: ^3.21.0 peerDependencies: "@typescript-eslint/parser": ^5.0.0 eslint: ^6.0.0 || ^7.0.0 || ^8.0.0 peerDependenciesMeta: typescript: optional: true checksum: fc104b389c768f9fa7d45a48c86d5c1ad522c1d0512943e782a56b1e3096b2cbcc1eea3fcc590647bf0658eef61aac35120a9c6daf979bf629ad2956deb516a1 languageName: node linkType: hard "@typescript-eslint/parser@npm:^5.50.0": version: 5.62.0 resolution: "@typescript-eslint/parser@npm:5.62.0" dependencies: "@typescript-eslint/scope-manager": 5.62.0 "@typescript-eslint/types": 5.62.0 "@typescript-eslint/typescript-estree": 5.62.0 debug: ^4.3.4 peerDependencies: eslint: ^6.0.0 || ^7.0.0 || ^8.0.0 peerDependenciesMeta: typescript: optional: true checksum: d168f4c7f21a7a63f47002e2d319bcbb6173597af5c60c1cf2de046b46c76b4930a093619e69faf2d30214c29ab27b54dcf1efc7046a6a6bd6f37f59a990e752 languageName: node linkType: hard "@typescript-eslint/scope-manager@npm:5.62.0": version: 5.62.0 resolution: "@typescript-eslint/scope-manager@npm:5.62.0" dependencies: "@typescript-eslint/types": 5.62.0 "@typescript-eslint/visitor-keys": 5.62.0 checksum: 6062d6b797fe1ce4d275bb0d17204c827494af59b5eaf09d8a78cdd39dadddb31074dded4297aaf5d0f839016d601032857698b0e4516c86a41207de606e9573 languageName: node linkType: hard "@typescript-eslint/type-utils@npm:5.62.0": version: 5.62.0 resolution: "@typescript-eslint/type-utils@npm:5.62.0" dependencies: "@typescript-eslint/typescript-estree": 5.62.0 "@typescript-eslint/utils": 5.62.0 debug: ^4.3.4 tsutils: ^3.21.0 peerDependencies: eslint: "*" peerDependenciesMeta: typescript: optional: true checksum: fc41eece5f315dfda14320be0da78d3a971d650ea41300be7196934b9715f3fe1120a80207551eb71d39568275dbbcf359bde540d1ca1439d8be15e9885d2739 languageName: node linkType: hard "@typescript-eslint/types@npm:5.62.0": version: 5.62.0 resolution: "@typescript-eslint/types@npm:5.62.0" checksum: 48c87117383d1864766486f24de34086155532b070f6264e09d0e6139449270f8a9559cfef3c56d16e3bcfb52d83d42105d61b36743626399c7c2b5e0ac3b670 languageName: node linkType: hard "@typescript-eslint/typescript-estree@npm:5.62.0": version: 5.62.0 resolution: "@typescript-eslint/typescript-estree@npm:5.62.0" dependencies: "@typescript-eslint/types": 5.62.0 "@typescript-eslint/visitor-keys": 5.62.0 debug: ^4.3.4 globby: ^11.1.0 is-glob: ^4.0.3 semver: ^7.3.7 tsutils: ^3.21.0 peerDependenciesMeta: typescript: optional: true checksum: 3624520abb5807ed8f57b1197e61c7b1ed770c56dfcaca66372d584ff50175225798bccb701f7ef129d62c5989070e1ee3a0aa2d84e56d9524dcf011a2bb1a52 languageName: node linkType: hard "@typescript-eslint/utils@npm:5.62.0": version: 5.62.0 resolution: "@typescript-eslint/utils@npm:5.62.0" dependencies: "@eslint-community/eslint-utils": ^4.2.0 "@types/json-schema": ^7.0.9 "@types/semver": ^7.3.12 "@typescript-eslint/scope-manager": 5.62.0 "@typescript-eslint/types": 5.62.0 "@typescript-eslint/typescript-estree": 5.62.0 eslint-scope: ^5.1.1 semver: ^7.3.7 peerDependencies: eslint: ^6.0.0 || ^7.0.0 || ^8.0.0 checksum: ee9398c8c5db6d1da09463ca7bf36ed134361e20131ea354b2da16a5fdb6df9ba70c62a388d19f6eebb421af1786dbbd79ba95ddd6ab287324fc171c3e28d931 languageName: node linkType: hard "@typescript-eslint/visitor-keys@npm:5.62.0": version: 5.62.0 resolution: "@typescript-eslint/visitor-keys@npm:5.62.0" dependencies: "@typescript-eslint/types": 5.62.0 eslint-visitor-keys: ^3.3.0 checksum: 976b05d103fe8335bef5c93ad3f76d781e3ce50329c0243ee0f00c0fcfb186c81df50e64bfdd34970148113f8ade90887f53e3c4938183afba830b4ba8e30a35 languageName: node linkType: hard "abbrev@npm:^1.0.0": version: 1.1.1 resolution: "abbrev@npm:1.1.1" checksum: a4a97ec07d7ea112c517036882b2ac22f3109b7b19077dc656316d07d308438aac28e4d9746dc4d84bf6b1e75b4a7b0a5f3cb30592419f128ca9a8cee3bcfa17 languageName: node linkType: hard "acorn-jsx@npm:^5.3.2": version: 5.3.2 resolution: "acorn-jsx@npm:5.3.2" peerDependencies: acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 checksum: c3d3b2a89c9a056b205b69530a37b972b404ee46ec8e5b341666f9513d3163e2a4f214a71f4dfc7370f5a9c07472d2fd1c11c91c3f03d093e37637d95da98950 languageName: node linkType: hard "acorn-walk@npm:^8.2.0": version: 8.2.0 resolution: "acorn-walk@npm:8.2.0" checksum: 1715e76c01dd7b2d4ca472f9c58968516a4899378a63ad5b6c2d668bba8da21a71976c14ec5f5b75f887b6317c4ae0b897ab141c831d741dc76024d8745f1ad1 languageName: node linkType: hard "acorn@npm:^8.8.2, acorn@npm:^8.9.0": version: 8.10.0 resolution: "acorn@npm:8.10.0" bin: acorn: bin/acorn checksum: 538ba38af0cc9e5ef983aee196c4b8b4d87c0c94532334fa7e065b2c8a1f85863467bb774231aae91613fcda5e68740c15d97b1967ae3394d20faddddd8af61d languageName: node linkType: hard "agent-base@npm:6, agent-base@npm:^6.0.2": version: 6.0.2 resolution: "agent-base@npm:6.0.2" dependencies: debug: 4 checksum: f52b6872cc96fd5f622071b71ef200e01c7c4c454ee68bc9accca90c98cfb39f2810e3e9aa330435835eedc8c23f4f8a15267f67c6e245d2b33757575bdac49d languageName: node linkType: hard "agentkeepalive@npm:^4.2.1": version: 4.5.0 resolution: "agentkeepalive@npm:4.5.0" dependencies: humanize-ms: ^1.2.1 checksum: 13278cd5b125e51eddd5079f04d6fe0914ac1b8b91c1f3db2c1822f99ac1a7457869068997784342fe455d59daaff22e14fb7b8c3da4e741896e7e31faf92481 languageName: node linkType: hard "aggregate-error@npm:^3.0.0": version: 3.1.0 resolution: "aggregate-error@npm:3.1.0" dependencies: clean-stack: ^2.0.0 indent-string: ^4.0.0 checksum: 1101a33f21baa27a2fa8e04b698271e64616b886795fd43c31068c07533c7b3facfcaf4e9e0cab3624bd88f729a592f1c901a1a229c9e490eafce411a8644b79 languageName: node linkType: hard "aggregate-error@npm:^4.0.0": version: 4.0.1 resolution: "aggregate-error@npm:4.0.1" dependencies: clean-stack: ^4.0.0 indent-string: ^5.0.0 checksum: bb3ffdfd13447800fff237c2cba752c59868ee669104bb995dfbbe0b8320e967d679e683dabb640feb32e4882d60258165cde0baafc4cd467cc7d275a13ad6b5 languageName: node linkType: hard "ajv@npm:^6.12.4": version: 6.12.6 resolution: "ajv@npm:6.12.6" dependencies: fast-deep-equal: ^3.1.1 fast-json-stable-stringify: ^2.0.0 json-schema-traverse: ^0.4.1 uri-js: ^4.2.2 checksum: 874972efe5c4202ab0a68379481fbd3d1b5d0a7bd6d3cc21d40d3536ebff3352a2a1fabb632d4fd2cc7fe4cbdcd5ed6782084c9bbf7f32a1536d18f9da5007d4 languageName: node linkType: hard "ansi-escapes@npm:^4.2.1, ansi-escapes@npm:^4.3.0": version: 4.3.2 resolution: "ansi-escapes@npm:4.3.2" dependencies: type-fest: ^0.21.3 checksum: 93111c42189c0a6bed9cdb4d7f2829548e943827ee8479c74d6e0b22ee127b2a21d3f8b5ca57723b8ef78ce011fbfc2784350eb2bde3ccfccf2f575fa8489815 languageName: node linkType: hard "ansi-escapes@npm:^5.0.0": version: 5.0.0 resolution: "ansi-escapes@npm:5.0.0" dependencies: type-fest: ^1.0.2 checksum: d4b5eb8207df38367945f5dd2ef41e08c28edc192dc766ef18af6b53736682f49d8bfcfa4e4d6ecbc2e2f97c258fda084fb29a9e43b69170b71090f771afccac languageName: node linkType: hard "ansi-regex@npm:^5.0.1": version: 5.0.1 resolution: "ansi-regex@npm:5.0.1" checksum: 2aa4bb54caf2d622f1afdad09441695af2a83aa3fe8b8afa581d205e57ed4261c183c4d3877cee25794443fde5876417d859c108078ab788d6af7e4fe52eb66b languageName: node linkType: hard "ansi-regex@npm:^6.0.1": version: 6.0.1 resolution: "ansi-regex@npm:6.0.1" checksum: 1ff8b7667cded1de4fa2c9ae283e979fc87036864317da86a2e546725f96406746411d0d85e87a2d12fa5abd715d90006de7fa4fa0477c92321ad3b4c7d4e169 languageName: node linkType: hard "ansi-styles@npm:^3.2.1": version: 3.2.1 resolution: "ansi-styles@npm:3.2.1" dependencies: color-convert: ^1.9.0 checksum: d85ade01c10e5dd77b6c89f34ed7531da5830d2cb5882c645f330079975b716438cd7ebb81d0d6e6b4f9c577f19ae41ab55f07f19786b02f9dfd9e0377395665 languageName: node linkType: hard "ansi-styles@npm:^4.0.0, ansi-styles@npm:^4.1.0": version: 4.3.0 resolution: "ansi-styles@npm:4.3.0" dependencies: color-convert: ^2.0.1 checksum: 513b44c3b2105dd14cc42a19271e80f386466c4be574bccf60b627432f9198571ebf4ab1e4c3ba17347658f4ee1711c163d574248c0c1cdc2d5917a0ad582ec4 languageName: node linkType: hard "ansi-styles@npm:^5.0.0": version: 5.2.0 resolution: "ansi-styles@npm:5.2.0" checksum: d7f4e97ce0623aea6bc0d90dcd28881ee04cba06c570b97fd3391bd7a268eedfd9d5e2dd4fdcbdd82b8105df5faf6f24aaedc08eaf3da898e702db5948f63469 languageName: node linkType: hard "ansi-styles@npm:^6.0.0, ansi-styles@npm:^6.1.0, ansi-styles@npm:^6.2.1": version: 6.2.1 resolution: "ansi-styles@npm:6.2.1" checksum: ef940f2f0ced1a6347398da88a91da7930c33ecac3c77b72c5905f8b8fe402c52e6fde304ff5347f616e27a742da3f1dc76de98f6866c69251ad0b07a66776d9 languageName: node linkType: hard "anymatch@npm:^3.0.3, anymatch@npm:~3.1.2": version: 3.1.3 resolution: "anymatch@npm:3.1.3" dependencies: normalize-path: ^3.0.0 picomatch: ^2.0.4 checksum: 3e044fd6d1d26545f235a9fe4d7a534e2029d8e59fa7fd9f2a6eb21230f6b5380ea1eaf55136e60cbf8e613544b3b766e7a6fa2102e2a3a117505466e3025dc2 languageName: node linkType: hard "aproba@npm:^1.0.3 || ^2.0.0": version: 2.0.0 resolution: "aproba@npm:2.0.0" checksum: 5615cadcfb45289eea63f8afd064ab656006361020e1735112e346593856f87435e02d8dcc7ff0d11928bc7d425f27bc7c2a84f6c0b35ab0ff659c814c138a24 languageName: node linkType: hard "are-we-there-yet@npm:^3.0.0": version: 3.0.1 resolution: "are-we-there-yet@npm:3.0.1" dependencies: delegates: ^1.0.0 readable-stream: ^3.6.0 checksum: 52590c24860fa7173bedeb69a4c05fb573473e860197f618b9a28432ee4379049336727ae3a1f9c4cb083114601c1140cee578376164d0e651217a9843f9fe83 languageName: node linkType: hard "argparse@npm:^1.0.7": version: 1.0.10 resolution: "argparse@npm:1.0.10" dependencies: sprintf-js: ~1.0.2 checksum: 7ca6e45583a28de7258e39e13d81e925cfa25d7d4aacbf806a382d3c02fcb13403a07fb8aeef949f10a7cfe4a62da0e2e807b348a5980554cc28ee573ef95945 languageName: node linkType: hard "argparse@npm:^2.0.1": version: 2.0.1 resolution: "argparse@npm:2.0.1" checksum: 83644b56493e89a254bae05702abf3a1101b4fa4d0ca31df1c9985275a5a5bd47b3c27b7fa0b71098d41114d8ca000e6ed90cad764b306f8a503665e4d517ced languageName: node linkType: hard "array-buffer-byte-length@npm:^1.0.0": version: 1.0.0 resolution: "array-buffer-byte-length@npm:1.0.0" dependencies: call-bind: ^1.0.2 is-array-buffer: ^3.0.1 checksum: 044e101ce150f4804ad19c51d6c4d4cfa505c5b2577bd179256e4aa3f3f6a0a5e9874c78cd428ee566ac574c8a04d7ce21af9fe52e844abfdccb82b33035a7c3 languageName: node linkType: hard "array-find-index@npm:^1.0.1": version: 1.0.2 resolution: "array-find-index@npm:1.0.2" checksum: aac128bf369e1ac6c06ff0bb330788371c0e256f71279fb92d745e26fb4b9db8920e485b4ec25e841c93146bf71a34dcdbcefa115e7e0f96927a214d237b7081 languageName: node linkType: hard "array-includes@npm:^3.1.6": version: 3.1.6 resolution: "array-includes@npm:3.1.6" dependencies: call-bind: ^1.0.2 define-properties: ^1.1.4 es-abstract: ^1.20.4 get-intrinsic: ^1.1.3 is-string: ^1.0.7 checksum: f22f8cd8ba8a6448d91eebdc69f04e4e55085d09232b5216ee2d476dab3ef59984e8d1889e662c6a0ed939dcb1b57fd05b2c0209c3370942fc41b752c82a2ca5 languageName: node linkType: hard "array-union@npm:^2.1.0": version: 2.1.0 resolution: "array-union@npm:2.1.0" checksum: 5bee12395cba82da674931df6d0fea23c4aa4660cb3b338ced9f828782a65caa232573e6bf3968f23e0c5eb301764a382cef2f128b170a9dc59de0e36c39f98d languageName: node linkType: hard "array.prototype.findlastindex@npm:^1.2.2": version: 1.2.2 resolution: "array.prototype.findlastindex@npm:1.2.2" dependencies: call-bind: ^1.0.2 define-properties: ^1.1.4 es-abstract: ^1.20.4 es-shim-unscopables: ^1.0.0 get-intrinsic: ^1.1.3 checksum: 8a166359f69a2a751c843f26b9c8cd03d0dc396a92cdcb85f4126b5f1cecdae5b2c0c616a71ea8aff026bde68165b44950b3664404bb73db0673e288495ba264 languageName: node linkType: hard "array.prototype.flat@npm:^1.3.1": version: 1.3.1 resolution: "array.prototype.flat@npm:1.3.1" dependencies: call-bind: ^1.0.2 define-properties: ^1.1.4 es-abstract: ^1.20.4 es-shim-unscopables: ^1.0.0 checksum: 5a8415949df79bf6e01afd7e8839bbde5a3581300e8ad5d8449dea52639e9e59b26a467665622783697917b43bf39940a6e621877c7dd9b3d1c1f97484b9b88b languageName: node linkType: hard "array.prototype.flatmap@npm:^1.3.1": version: 1.3.1 resolution: "array.prototype.flatmap@npm:1.3.1" dependencies: call-bind: ^1.0.2 define-properties: ^1.1.4 es-abstract: ^1.20.4 es-shim-unscopables: ^1.0.0 checksum: 8c1c43a4995f12cf12523436da28515184c753807b3f0bc2ca6c075f71c470b099e2090cc67dba8e5280958fea401c1d0c59e1db0143272aef6cd1103921a987 languageName: node linkType: hard "arraybuffer.prototype.slice@npm:^1.0.1": version: 1.0.1 resolution: "arraybuffer.prototype.slice@npm:1.0.1" dependencies: array-buffer-byte-length: ^1.0.0 call-bind: ^1.0.2 define-properties: ^1.2.0 get-intrinsic: ^1.2.1 is-array-buffer: ^3.0.2 is-shared-array-buffer: ^1.0.2 checksum: e3e9b2a3e988ebfeddce4c7e8f69df730c9e48cb04b0d40ff0874ce3d86b3d1339dd520ffde5e39c02610bc172ecfbd4bc93324b1cabd9554c44a56b131ce0ce languageName: node linkType: hard "arrgv@npm:^1.0.2": version: 1.0.2 resolution: "arrgv@npm:1.0.2" checksum: 470bbb406ea3b34810dd8b03c0b33282617a42d9fce0ab45d58596efefd042fc548eda49161fa8e3f607cbe9df90e7a67003a09043ab9081eff70f97c63dd0e2 languageName: node linkType: hard "arrify@npm:^3.0.0": version: 3.0.0 resolution: "arrify@npm:3.0.0" checksum: d6c6f3dad9571234f320e130d57fddb2cc283c87f2ac7df6c7005dffc5161b7bb9376f4be655ed257050330336e84afc4f3020d77696ad231ff580a94ae5aba6 languageName: node linkType: hard "astral-regex@npm:^2.0.0": version: 2.0.0 resolution: "astral-regex@npm:2.0.0" checksum: 876231688c66400473ba505731df37ea436e574dd524520294cc3bbc54ea40334865e01fa0d074d74d036ee874ee7e62f486ea38bc421ee8e6a871c06f011766 languageName: node linkType: hard "ava@npm:^5.1.1": version: 5.3.1 resolution: "ava@npm:5.3.1" dependencies: acorn: ^8.8.2 acorn-walk: ^8.2.0 ansi-styles: ^6.2.1 arrgv: ^1.0.2 arrify: ^3.0.0 callsites: ^4.0.0 cbor: ^8.1.0 chalk: ^5.2.0 chokidar: ^3.5.3 chunkd: ^2.0.1 ci-info: ^3.8.0 ci-parallel-vars: ^1.0.1 clean-yaml-object: ^0.1.0 cli-truncate: ^3.1.0 code-excerpt: ^4.0.0 common-path-prefix: ^3.0.0 concordance: ^5.0.4 currently-unhandled: ^0.4.1 debug: ^4.3.4 emittery: ^1.0.1 figures: ^5.0.0 globby: ^13.1.4 ignore-by-default: ^2.1.0 indent-string: ^5.0.0 is-error: ^2.2.2 is-plain-object: ^5.0.0 is-promise: ^4.0.0 matcher: ^5.0.0 mem: ^9.0.2 ms: ^2.1.3 p-event: ^5.0.1 p-map: ^5.5.0 picomatch: ^2.3.1 pkg-conf: ^4.0.0 plur: ^5.1.0 pretty-ms: ^8.0.0 resolve-cwd: ^3.0.0 stack-utils: ^2.0.6 strip-ansi: ^7.0.1 supertap: ^3.0.1 temp-dir: ^3.0.0 write-file-atomic: ^5.0.1 yargs: ^17.7.2 peerDependencies: "@ava/typescript": "*" peerDependenciesMeta: "@ava/typescript": optional: true bin: ava: entrypoints/cli.mjs checksum: 126a5932baef74eccd8bec992bd522e25c05b6ee4985dde87c20cece76c2377f0bf9448f242f3f9cd2abbf7a5ac932fe4e4abde2a23792d6271a6088e5a1984e languageName: node linkType: hard "available-typed-arrays@npm:^1.0.5": version: 1.0.5 resolution: "available-typed-arrays@npm:1.0.5" checksum: 20eb47b3cefd7db027b9bbb993c658abd36d4edd3fe1060e83699a03ee275b0c9b216cc076ff3f2db29073225fb70e7613987af14269ac1fe2a19803ccc97f1a languageName: node linkType: hard "babel-jest@npm:^29.6.4": version: 29.6.4 resolution: "babel-jest@npm:29.6.4" dependencies: "@jest/transform": ^29.6.4 "@types/babel__core": ^7.1.14 babel-plugin-istanbul: ^6.1.1 babel-preset-jest: ^29.6.3 chalk: ^4.0.0 graceful-fs: ^4.2.9 slash: ^3.0.0 peerDependencies: "@babel/core": ^7.8.0 checksum: c574f1805ab6b51a7d0f5a028aad19eec4634be81e66e6f4631b79b34d8ea05dfb53629f3686c77345163872730aa0408c9e5937ed85f846984228f7ab5e5d96 languageName: node linkType: hard "babel-plugin-istanbul@npm:^6.1.1": version: 6.1.1 resolution: "babel-plugin-istanbul@npm:6.1.1" dependencies: "@babel/helper-plugin-utils": ^7.0.0 "@istanbuljs/load-nyc-config": ^1.0.0 "@istanbuljs/schema": ^0.1.2 istanbul-lib-instrument: ^5.0.4 test-exclude: ^6.0.0 checksum: cb4fd95738219f232f0aece1116628cccff16db891713c4ccb501cddbbf9272951a5df81f2f2658dfdf4b3e7b236a9d5cbcf04d5d8c07dd5077297339598061a languageName: node linkType: hard "babel-plugin-jest-hoist@npm:^29.6.3": version: 29.6.3 resolution: "babel-plugin-jest-hoist@npm:29.6.3" dependencies: "@babel/template": ^7.3.3 "@babel/types": ^7.3.3 "@types/babel__core": ^7.1.14 "@types/babel__traverse": ^7.0.6 checksum: 51250f22815a7318f17214a9d44650ba89551e6d4f47a2dc259128428324b52f5a73979d010cefd921fd5a720d8c1d55ad74ff601cd94c7bd44d5f6292fde2d1 languageName: node linkType: hard "babel-preset-current-node-syntax@npm:^1.0.0": version: 1.0.1 resolution: "babel-preset-current-node-syntax@npm:1.0.1" dependencies: "@babel/plugin-syntax-async-generators": ^7.8.4 "@babel/plugin-syntax-bigint": ^7.8.3 "@babel/plugin-syntax-class-properties": ^7.8.3 "@babel/plugin-syntax-import-meta": ^7.8.3 "@babel/plugin-syntax-json-strings": ^7.8.3 "@babel/plugin-syntax-logical-assignment-operators": ^7.8.3 "@babel/plugin-syntax-nullish-coalescing-operator": ^7.8.3 "@babel/plugin-syntax-numeric-separator": ^7.8.3 "@babel/plugin-syntax-object-rest-spread": ^7.8.3 "@babel/plugin-syntax-optional-catch-binding": ^7.8.3 "@babel/plugin-syntax-optional-chaining": ^7.8.3 "@babel/plugin-syntax-top-level-await": ^7.8.3 peerDependencies: "@babel/core": ^7.0.0 checksum: d118c2742498c5492c095bc8541f4076b253e705b5f1ad9a2e7d302d81a84866f0070346662355c8e25fc02caa28dc2da8d69bcd67794a0d60c4d6fab6913cc8 languageName: node linkType: hard "babel-preset-jest@npm:^29.6.3": version: 29.6.3 resolution: "babel-preset-jest@npm:29.6.3" dependencies: babel-plugin-jest-hoist: ^29.6.3 babel-preset-current-node-syntax: ^1.0.0 peerDependencies: "@babel/core": ^7.0.0 checksum: aa4ff2a8a728d9d698ed521e3461a109a1e66202b13d3494e41eea30729a5e7cc03b3a2d56c594423a135429c37bf63a9fa8b0b9ce275298be3095a88c69f6fb languageName: node linkType: hard "balanced-match@npm:^1.0.0": version: 1.0.0 resolution: "balanced-match@npm:1.0.0" checksum: 9b67bfe558772f40cf743a3469b48b286aecec2ea9fe80c48d74845e53aab1cef524fafedf123a63019b49ac397760573ef5f173f539423061f7217cbb5fbd40 languageName: node linkType: hard "benchmark@npm:^2.1.4": version: 2.1.4 resolution: "benchmark@npm:2.1.4" dependencies: lodash: ^4.17.4 platform: ^1.3.3 checksum: aa466561d4f2b0a2419a3069b8f90fd35ffacf26849697eea9de525ecfbd10b44da11070cc51c88d772076db8cb2415641b493de7d6c024fdf8551019c6fcf1c languageName: node linkType: hard "benny@npm:^3.7.1": version: 3.7.1 resolution: "benny@npm:3.7.1" dependencies: "@arrows/composition": ^1.0.0 "@arrows/dispatch": ^1.0.2 "@arrows/multimethod": ^1.1.6 benchmark: ^2.1.4 common-tags: ^1.8.0 fs-extra: ^10.0.0 json2csv: ^5.0.6 kleur: ^4.1.4 log-update: ^4.0.0 checksum: 8dcca91afb6e97b986a16fc73a2a12b2d51c306dc1e9fca6ace988b3ca26405dffcb85309083a449d27cfab440d8164b5cff3a0deba034879da401305412af34 languageName: node linkType: hard "binary-extensions@npm:^2.0.0": version: 2.2.0 resolution: "binary-extensions@npm:2.2.0" checksum: ccd267956c58d2315f5d3ea6757cf09863c5fc703e50fbeb13a7dc849b812ef76e3cf9ca8f35a0c48498776a7478d7b4a0418e1e2b8cb9cb9731f2922aaad7f8 languageName: node linkType: hard "blueimp-md5@npm:^2.10.0": version: 2.19.0 resolution: "blueimp-md5@npm:2.19.0" checksum: 28095dcbd2c67152a2938006e8d7c74c3406ba6556071298f872505432feb2c13241b0476644160ee0a5220383ba94cb8ccdac0053b51f68d168728f9c382530 languageName: node linkType: hard "brace-expansion@npm:^1.1.7": version: 1.1.11 resolution: "brace-expansion@npm:1.1.11" dependencies: balanced-match: ^1.0.0 concat-map: 0.0.1 checksum: faf34a7bb0c3fcf4b59c7808bc5d2a96a40988addf2e7e09dfbb67a2251800e0d14cd2bfc1aa79174f2f5095c54ff27f46fb1289fe2d77dac755b5eb3434cc07 languageName: node linkType: hard "brace-expansion@npm:^2.0.1": version: 2.0.1 resolution: "brace-expansion@npm:2.0.1" dependencies: balanced-match: ^1.0.0 checksum: a61e7cd2e8a8505e9f0036b3b6108ba5e926b4b55089eeb5550cd04a471fe216c96d4fe7e4c7f995c728c554ae20ddfc4244cad10aef255e72b62930afd233d1 languageName: node linkType: hard "braces@npm:^3.0.2, braces@npm:~3.0.2": version: 3.0.2 resolution: "braces@npm:3.0.2" dependencies: fill-range: ^7.0.1 checksum: e2a8e769a863f3d4ee887b5fe21f63193a891c68b612ddb4b68d82d1b5f3ff9073af066c343e9867a393fe4c2555dcb33e89b937195feb9c1613d259edfcd459 languageName: node linkType: hard "browserslist@npm:^4.21.9": version: 4.21.10 resolution: "browserslist@npm:4.21.10" dependencies: caniuse-lite: ^1.0.30001517 electron-to-chromium: ^1.4.477 node-releases: ^2.0.13 update-browserslist-db: ^1.0.11 bin: browserslist: cli.js checksum: 1e27c0f111a35d1dd0e8fc2c61781b0daefabc2c9471b0b10537ce54843014bceb2a1ce4571af1a82b2bf1e6e6e05d38865916689a158f03bc2c7a4ec2577db8 languageName: node linkType: hard "bs-logger@npm:0.x": version: 0.2.6 resolution: "bs-logger@npm:0.2.6" dependencies: fast-json-stable-stringify: 2.x checksum: d34bdaf68c64bd099ab97c3ea608c9ae7d3f5faa1178b3f3f345acd94e852e608b2d4f9103fb2e503f5e69780e98293df41691b84be909b41cf5045374d54606 languageName: node linkType: hard "bser@npm:2.1.1": version: 2.1.1 resolution: "bser@npm:2.1.1" dependencies: node-int64: ^0.4.0 checksum: 9ba4dc58ce86300c862bffc3ae91f00b2a03b01ee07f3564beeeaf82aa243b8b03ba53f123b0b842c190d4399b94697970c8e7cf7b1ea44b61aa28c3526a4449 languageName: node linkType: hard "buffer-from@npm:^1.0.0": version: 1.1.2 resolution: "buffer-from@npm:1.1.2" checksum: 0448524a562b37d4d7ed9efd91685a5b77a50672c556ea254ac9a6d30e3403a517d8981f10e565db24e8339413b43c97ca2951f10e399c6125a0d8911f5679bb languageName: node linkType: hard "cacache@npm:^17.0.0": version: 17.1.4 resolution: "cacache@npm:17.1.4" dependencies: "@npmcli/fs": ^3.1.0 fs-minipass: ^3.0.0 glob: ^10.2.2 lru-cache: ^7.7.1 minipass: ^7.0.3 minipass-collect: ^1.0.2 minipass-flush: ^1.0.5 minipass-pipeline: ^1.2.4 p-map: ^4.0.0 ssri: ^10.0.0 tar: ^6.1.11 unique-filename: ^3.0.0 checksum: b7751df756656954a51201335addced8f63fc53266fa56392c9f5ae83c8d27debffb4458ac2d168a744a4517ec3f2163af05c20097f93d17bdc2dc8a385e14a6 languageName: node linkType: hard "call-bind@npm:^1.0.0, call-bind@npm:^1.0.2": version: 1.0.2 resolution: "call-bind@npm:1.0.2" dependencies: function-bind: ^1.1.1 get-intrinsic: ^1.0.2 checksum: f8e31de9d19988a4b80f3e704788c4a2d6b6f3d17cfec4f57dc29ced450c53a49270dc66bf0fbd693329ee948dd33e6c90a329519aef17474a4d961e8d6426b0 languageName: node linkType: hard "callsites@npm:^3.0.0": version: 3.1.0 resolution: "callsites@npm:3.1.0" checksum: 072d17b6abb459c2ba96598918b55868af677154bec7e73d222ef95a8fdb9bbf7dae96a8421085cdad8cd190d86653b5b6dc55a4484f2e5b2e27d5e0c3fc15b3 languageName: node linkType: hard "callsites@npm:^4.0.0": version: 4.1.0 resolution: "callsites@npm:4.1.0" checksum: 4ad31de7b7615fa25bdab9c2373865209d2d5190f895cdf2e2f518bd1dafa7ebcda2e6e9cc9640f2dfde6b3893d82fa4359a78ffc27baad2503227553c6882fa languageName: node linkType: hard "camelcase@npm:^5.3.1": version: 5.3.1 resolution: "camelcase@npm:5.3.1" checksum: e6effce26b9404e3c0f301498184f243811c30dfe6d0b9051863bd8e4034d09c8c2923794f280d6827e5aa055f6c434115ff97864a16a963366fb35fd673024b languageName: node linkType: hard "camelcase@npm:^6.2.0": version: 6.3.0 resolution: "camelcase@npm:6.3.0" checksum: 8c96818a9076434998511251dcb2761a94817ea17dbdc37f47ac080bd088fc62c7369429a19e2178b993497132c8cbcf5cc1f44ba963e76782ba469c0474938d languageName: node linkType: hard "caniuse-lite@npm:^1.0.30001517": version: 1.0.30001522 resolution: "caniuse-lite@npm:1.0.30001522" checksum: 56e3551c02ae595085114073cf242f7d9d54d32255c80893ca9098a44f44fc6eef353936f234f31c7f4cb894dd2b6c9c4626e30649ee29e04d70aa127eeefeb0 languageName: node linkType: hard "cbor@npm:^8.1.0": version: 8.1.0 resolution: "cbor@npm:8.1.0" dependencies: nofilter: ^3.1.0 checksum: a90338435dc7b45cc01461af979e3bb6ddd4f2a08584c437586039cd5f2235014c06e49d664295debbfb3514d87b2f06728092ab6aa6175e2e85e9cd7dc0c1fd languageName: node linkType: hard "chalk@npm:5.3.0, chalk@npm:^5.2.0": version: 5.3.0 resolution: "chalk@npm:5.3.0" checksum: 623922e077b7d1e9dedaea6f8b9e9352921f8ae3afe739132e0e00c275971bdd331268183b2628cf4ab1727c45ea1f28d7e24ac23ce1db1eb653c414ca8a5a80 languageName: node linkType: hard "chalk@npm:^2.4.1, chalk@npm:^2.4.2": version: 2.4.2 resolution: "chalk@npm:2.4.2" dependencies: ansi-styles: ^3.2.1 escape-string-regexp: ^1.0.5 supports-color: ^5.3.0 checksum: ec3661d38fe77f681200f878edbd9448821924e0f93a9cefc0e26a33b145f1027a2084bf19967160d11e1f03bfe4eaffcabf5493b89098b2782c3fe0b03d80c2 languageName: node linkType: hard "chalk@npm:^4.0.0": version: 4.1.2 resolution: "chalk@npm:4.1.2" dependencies: ansi-styles: ^4.1.0 supports-color: ^7.1.0 checksum: fe75c9d5c76a7a98d45495b91b2172fa3b7a09e0cc9370e5c8feb1c567b85c4288e2b3fded7cfdd7359ac28d6b3844feb8b82b8686842e93d23c827c417e83fc languageName: node linkType: hard "char-regex@npm:^1.0.2": version: 1.0.2 resolution: "char-regex@npm:1.0.2" checksum: b563e4b6039b15213114626621e7a3d12f31008bdce20f9c741d69987f62aeaace7ec30f6018890ad77b2e9b4d95324c9f5acfca58a9441e3b1dcdd1e2525d17 languageName: node linkType: hard "chokidar@npm:^3.5.3": version: 3.5.3 resolution: "chokidar@npm:3.5.3" dependencies: anymatch: ~3.1.2 braces: ~3.0.2 fsevents: ~2.3.2 glob-parent: ~5.1.2 is-binary-path: ~2.1.0 is-glob: ~4.0.1 normalize-path: ~3.0.0 readdirp: ~3.6.0 dependenciesMeta: fsevents: optional: true checksum: b49fcde40176ba007ff361b198a2d35df60d9bb2a5aab228279eb810feae9294a6b4649ab15981304447afe1e6ffbf4788ad5db77235dc770ab777c6e771980c languageName: node linkType: hard "chownr@npm:^2.0.0": version: 2.0.0 resolution: "chownr@npm:2.0.0" checksum: c57cf9dd0791e2f18a5ee9c1a299ae6e801ff58fee96dc8bfd0dcb4738a6ce58dd252a3605b1c93c6418fe4f9d5093b28ffbf4d66648cb2a9c67eaef9679be2f languageName: node linkType: hard "chunkd@npm:^2.0.1": version: 2.0.1 resolution: "chunkd@npm:2.0.1" checksum: bab8cc08c752a3648984385dc6f61d751e89dbeef648d22a3b661e1d470eaa0f5182f0b4303710f13ae83d2f85144f8eb2dde7a975861d9021b5c56b881f457b languageName: node linkType: hard "ci-info@npm:^3.2.0, ci-info@npm:^3.8.0": version: 3.8.0 resolution: "ci-info@npm:3.8.0" checksum: d0a4d3160497cae54294974a7246202244fff031b0a6ea20dd57b10ec510aa17399c41a1b0982142c105f3255aff2173e5c0dd7302ee1b2f28ba3debda375098 languageName: node linkType: hard "ci-parallel-vars@npm:^1.0.1": version: 1.0.1 resolution: "ci-parallel-vars@npm:1.0.1" checksum: ae859831f7e8e3585db731b8306c336616e37bd709dad1d7775ea4c0731aefd94741dabb48201edc6827d000008fd7fb72cb977967614ee2d99d6b499f0c35fe languageName: node linkType: hard "cjs-module-lexer@npm:^1.0.0": version: 1.2.3 resolution: "cjs-module-lexer@npm:1.2.3" checksum: 5ea3cb867a9bb609b6d476cd86590d105f3cfd6514db38ff71f63992ab40939c2feb68967faa15a6d2b1f90daa6416b79ea2de486e9e2485a6f8b66a21b4fb0a languageName: node linkType: hard "clean-stack@npm:^2.0.0": version: 2.2.0 resolution: "clean-stack@npm:2.2.0" checksum: 2ac8cd2b2f5ec986a3c743935ec85b07bc174d5421a5efc8017e1f146a1cf5f781ae962618f416352103b32c9cd7e203276e8c28241bbe946160cab16149fb68 languageName: node linkType: hard "clean-stack@npm:^4.0.0": version: 4.2.0 resolution: "clean-stack@npm:4.2.0" dependencies: escape-string-regexp: 5.0.0 checksum: 373f656a31face5c615c0839213b9b542a0a48057abfb1df66900eab4dc2a5c6097628e4a0b5aa559cdfc4e66f8a14ea47be9681773165a44470ef5fb8ccc172 languageName: node linkType: hard "clean-yaml-object@npm:^0.1.0": version: 0.1.0 resolution: "clean-yaml-object@npm:0.1.0" checksum: 0374ad2f1fbd4984ecf56ebc62200092f6372b9ccf1b7971bb979c328fb12fe76e759fb1e8adc491c80b7b1861f9f00c7f19813dd2a0f49c88231422c70451f4 languageName: node linkType: hard "cli-cursor@npm:^3.1.0": version: 3.1.0 resolution: "cli-cursor@npm:3.1.0" dependencies: restore-cursor: ^3.1.0 checksum: 2692784c6cd2fd85cfdbd11f53aea73a463a6d64a77c3e098b2b4697a20443f430c220629e1ca3b195ea5ac4a97a74c2ee411f3807abf6df2b66211fec0c0a29 languageName: node linkType: hard "cli-cursor@npm:^4.0.0": version: 4.0.0 resolution: "cli-cursor@npm:4.0.0" dependencies: restore-cursor: ^4.0.0 checksum: ab3f3ea2076e2176a1da29f9d64f72ec3efad51c0960898b56c8a17671365c26e67b735920530eaf7328d61f8bd41c27f46b9cf6e4e10fe2fa44b5e8c0e392cc languageName: node linkType: hard "cli-truncate@npm:^3.1.0": version: 3.1.0 resolution: "cli-truncate@npm:3.1.0" dependencies: slice-ansi: ^5.0.0 string-width: ^5.0.0 checksum: c3243e41974445691c63f8b405df1d5a24049dc33d324fe448dc572e561a7b772ae982692900b1a5960901cc4fc7def25a629b9c69a4208ee89d12ab3332617a languageName: node linkType: hard "cliui@npm:^8.0.1": version: 8.0.1 resolution: "cliui@npm:8.0.1" dependencies: string-width: ^4.2.0 strip-ansi: ^6.0.1 wrap-ansi: ^7.0.0 checksum: 79648b3b0045f2e285b76fb2e24e207c6db44323581e421c3acbd0e86454cba1b37aea976ab50195a49e7384b871e6dfb2247ad7dec53c02454ac6497394cb56 languageName: node linkType: hard "co@npm:^4.6.0": version: 4.6.0 resolution: "co@npm:4.6.0" checksum: 5210d9223010eb95b29df06a91116f2cf7c8e0748a9013ed853b53f362ea0e822f1e5bb054fb3cefc645239a4cf966af1f6133a3b43f40d591f3b68ed6cf0510 languageName: node linkType: hard "code-excerpt@npm:^4.0.0": version: 4.0.0 resolution: "code-excerpt@npm:4.0.0" dependencies: convert-to-spaces: ^2.0.1 checksum: d57137d8f4825879283a828cc02a1115b56858dc54ed06c625c8f67d6685d1becd2fbaa7f0ab19ecca1f5cca03f8c97bbc1f013cab40261e4d3275032e65efe9 languageName: node linkType: hard "collect-v8-coverage@npm:^1.0.0": version: 1.0.2 resolution: "collect-v8-coverage@npm:1.0.2" checksum: c10f41c39ab84629d16f9f6137bc8a63d332244383fc368caf2d2052b5e04c20cd1fd70f66fcf4e2422b84c8226598b776d39d5f2d2a51867cc1ed5d1982b4da languageName: node linkType: hard "color-convert@npm:^1.9.0": version: 1.9.3 resolution: "color-convert@npm:1.9.3" dependencies: color-name: 1.1.3 checksum: fd7a64a17cde98fb923b1dd05c5f2e6f7aefda1b60d67e8d449f9328b4e53b228a428fd38bfeaeb2db2ff6b6503a776a996150b80cdf224062af08a5c8a3a203 languageName: node linkType: hard "color-convert@npm:^2.0.1": version: 2.0.1 resolution: "color-convert@npm:2.0.1" dependencies: color-name: ~1.1.4 checksum: 79e6bdb9fd479a205c71d89574fccfb22bd9053bd98c6c4d870d65c132e5e904e6034978e55b43d69fcaa7433af2016ee203ce76eeba9cfa554b373e7f7db336 languageName: node linkType: hard "color-name@npm:1.1.3": version: 1.1.3 resolution: "color-name@npm:1.1.3" checksum: 09c5d3e33d2105850153b14466501f2bfb30324a2f76568a408763a3b7433b0e50e5b4ab1947868e65cb101bb7cb75029553f2c333b6d4b8138a73fcc133d69d languageName: node linkType: hard "color-name@npm:~1.1.4": version: 1.1.4 resolution: "color-name@npm:1.1.4" checksum: b0445859521eb4021cd0fb0cc1a75cecf67fceecae89b63f62b201cca8d345baf8b952c966862a9d9a2632987d4f6581f0ec8d957dfacece86f0a7919316f610 languageName: node linkType: hard "color-support@npm:^1.1.3": version: 1.1.3 resolution: "color-support@npm:1.1.3" bin: color-support: bin.js checksum: 9b7356817670b9a13a26ca5af1c21615463b500783b739b7634a0c2047c16cef4b2865d7576875c31c3cddf9dd621fa19285e628f20198b233a5cfdda6d0793b languageName: node linkType: hard "colorette@npm:^2.0.19, colorette@npm:^2.0.20": version: 2.0.20 resolution: "colorette@npm:2.0.20" checksum: 0c016fea2b91b733eb9f4bcdb580018f52c0bc0979443dad930e5037a968237ac53d9beb98e218d2e9235834f8eebce7f8e080422d6194e957454255bde71d3d languageName: node linkType: hard "commander@npm:11.0.0": version: 11.0.0 resolution: "commander@npm:11.0.0" checksum: 6621954e1e1d078b4991c1f5bbd9439ad37aa7768d6ab4842de1dbd4d222c8a27e1b8e62108b3a92988614af45031d5bb2a2aaa92951f4d0c934d1a1ac564bb4 languageName: node linkType: hard "commander@npm:^6.1.0": version: 6.2.1 resolution: "commander@npm:6.2.1" checksum: d7090410c0de6bc5c67d3ca41c41760d6d268f3c799e530aafb73b7437d1826bbf0d2a3edac33f8b57cc9887b4a986dce307fa5557e109be40eadb7c43b21742 languageName: node linkType: hard "common-path-prefix@npm:^3.0.0": version: 3.0.0 resolution: "common-path-prefix@npm:3.0.0" checksum: fdb3c4f54e51e70d417ccd950c07f757582de800c0678ca388aedefefc84982039f346f9fd9a1252d08d2da9e9ef4019f580a1d1d3a10da031e4bb3c924c5818 languageName: node linkType: hard "common-tags@npm:^1.8.0": version: 1.8.2 resolution: "common-tags@npm:1.8.2" checksum: 767a6255a84bbc47df49a60ab583053bb29a7d9687066a18500a516188a062c4e4cd52de341f22de0b07062e699b1b8fe3cfa1cb55b241cb9301aeb4f45b4dff languageName: node linkType: hard "concat-map@npm:0.0.1": version: 0.0.1 resolution: "concat-map@npm:0.0.1" checksum: 902a9f5d8967a3e2faf138d5cb784b9979bad2e6db5357c5b21c568df4ebe62bcb15108af1b2253744844eb964fc023fbd9afbbbb6ddd0bcc204c6fb5b7bf3af languageName: node linkType: hard "concordance@npm:^5.0.4": version: 5.0.4 resolution: "concordance@npm:5.0.4" dependencies: date-time: ^3.1.0 esutils: ^2.0.3 fast-diff: ^1.2.0 js-string-escape: ^1.0.1 lodash: ^4.17.15 md5-hex: ^3.0.1 semver: ^7.3.2 well-known-symbols: ^2.0.0 checksum: 749153ba711492feb7c3d2f5bb04c107157440b3e39509bd5dd19ee7b3ac751d1e4cd75796d9f702e0a713312dbc661421c68aa4a2c34d5f6d91f47e3a1c64a6 languageName: node linkType: hard "console-control-strings@npm:^1.1.0": version: 1.1.0 resolution: "console-control-strings@npm:1.1.0" checksum: 8755d76787f94e6cf79ce4666f0c5519906d7f5b02d4b884cf41e11dcd759ed69c57da0670afd9236d229a46e0f9cf519db0cd829c6dca820bb5a5c3def584ed languageName: node linkType: hard "convert-source-map@npm:^1.6.0, convert-source-map@npm:^1.7.0": version: 1.9.0 resolution: "convert-source-map@npm:1.9.0" checksum: dc55a1f28ddd0e9485ef13565f8f756b342f9a46c4ae18b843fe3c30c675d058d6a4823eff86d472f187b176f0adf51ea7b69ea38be34be4a63cbbf91b0593c8 languageName: node linkType: hard "convert-source-map@npm:^2.0.0": version: 2.0.0 resolution: "convert-source-map@npm:2.0.0" checksum: 63ae9933be5a2b8d4509daca5124e20c14d023c820258e484e32dc324d34c2754e71297c94a05784064ad27615037ef677e3f0c00469fb55f409d2bb21261035 languageName: node linkType: hard "convert-to-spaces@npm:^2.0.1": version: 2.0.1 resolution: "convert-to-spaces@npm:2.0.1" checksum: bbb324e5916fe9866f65c0ff5f9c1ea933764d0bdb09fccaf59542e40545ed483db6b2339c6d9eb56a11965a58f1a6038f3174f0e2fb7601343c7107ca5e2751 languageName: node linkType: hard "cross-spawn@npm:^6.0.5": version: 6.0.5 resolution: "cross-spawn@npm:6.0.5" dependencies: nice-try: ^1.0.4 path-key: ^2.0.1 semver: ^5.5.0 shebang-command: ^1.2.0 which: ^1.2.9 checksum: f893bb0d96cd3d5751d04e67145bdddf25f99449531a72e82dcbbd42796bbc8268c1076c6b3ea51d4d455839902804b94bc45dfb37ecbb32ea8e54a6741c3ab9 languageName: node linkType: hard "cross-spawn@npm:^7.0.0, cross-spawn@npm:^7.0.2, cross-spawn@npm:^7.0.3": version: 7.0.3 resolution: "cross-spawn@npm:7.0.3" dependencies: path-key: ^3.1.0 shebang-command: ^2.0.0 which: ^2.0.1 checksum: 671cc7c7288c3a8406f3c69a3ae2fc85555c04169e9d611def9a675635472614f1c0ed0ef80955d5b6d4e724f6ced67f0ad1bb006c2ea643488fcfef994d7f52 languageName: node linkType: hard "currently-unhandled@npm:^0.4.1": version: 0.4.1 resolution: "currently-unhandled@npm:0.4.1" dependencies: array-find-index: ^1.0.1 checksum: 1f59fe10b5339b54b1a1eee110022f663f3495cf7cf2f480686e89edc7fa8bfe42dbab4b54f85034bc8b092a76cc7becbc2dad4f9adad332ab5831bec39ad540 languageName: node linkType: hard "date-time@npm:^3.1.0": version: 3.1.0 resolution: "date-time@npm:3.1.0" dependencies: time-zone: ^1.0.0 checksum: f9cfcd1b15dfeabab15c0b9d18eb9e4e2d9d4371713564178d46a8f91ad577a290b5178b80050718d02d9c0cf646f8a875011e12d1ed05871e9f72c72c8a8fe6 languageName: node linkType: hard "debug@npm:4, debug@npm:4.3.4, debug@npm:^4.1.0, debug@npm:^4.1.1, debug@npm:^4.3.2, debug@npm:^4.3.3, debug@npm:^4.3.4": version: 4.3.4 resolution: "debug@npm:4.3.4" dependencies: ms: 2.1.2 peerDependenciesMeta: supports-color: optional: true checksum: 3dbad3f94ea64f34431a9cbf0bafb61853eda57bff2880036153438f50fb5a84f27683ba0d8e5426bf41a8c6ff03879488120cf5b3a761e77953169c0600a708 languageName: node linkType: hard "debug@npm:^3.2.7": version: 3.2.7 resolution: "debug@npm:3.2.7" dependencies: ms: ^2.1.1 checksum: b3d8c5940799914d30314b7c3304a43305fd0715581a919dacb8b3176d024a782062368405b47491516d2091d6462d4d11f2f4974a405048094f8bfebfa3071c languageName: node linkType: hard "dedent@npm:^1.0.0": version: 1.5.1 resolution: "dedent@npm:1.5.1" peerDependencies: babel-plugin-macros: ^3.1.0 peerDependenciesMeta: babel-plugin-macros: optional: true checksum: c3c300a14edf1bdf5a873f9e4b22e839d62490bc5c8d6169c1f15858a1a76733d06a9a56930e963d677a2ceeca4b6b0894cc5ea2f501aa382ca5b92af3413c2a languageName: node linkType: hard "deep-is@npm:^0.1.3": version: 0.1.4 resolution: "deep-is@npm:0.1.4" checksum: edb65dd0d7d1b9c40b2f50219aef30e116cedd6fc79290e740972c132c09106d2e80aa0bc8826673dd5a00222d4179c84b36a790eef63a4c4bca75a37ef90804 languageName: node linkType: hard "deepmerge@npm:^4.2.2": version: 4.3.1 resolution: "deepmerge@npm:4.3.1" checksum: 2024c6a980a1b7128084170c4cf56b0fd58a63f2da1660dcfe977415f27b17dbe5888668b59d0b063753f3220719d5e400b7f113609489c90160bb9a5518d052 languageName: node linkType: hard "define-properties@npm:^1.1.3, define-properties@npm:^1.1.4, define-properties@npm:^1.2.0": version: 1.2.0 resolution: "define-properties@npm:1.2.0" dependencies: has-property-descriptors: ^1.0.0 object-keys: ^1.1.1 checksum: e60aee6a19b102df4e2b1f301816804e81ab48bb91f00d0d935f269bf4b3f79c88b39e4f89eaa132890d23267335fd1140dfcd8d5ccd61031a0a2c41a54e33a6 languageName: node linkType: hard "delegates@npm:^1.0.0": version: 1.0.0 resolution: "delegates@npm:1.0.0" checksum: a51744d9b53c164ba9c0492471a1a2ffa0b6727451bdc89e31627fdf4adda9d51277cfcbfb20f0a6f08ccb3c436f341df3e92631a3440226d93a8971724771fd languageName: node linkType: hard "detect-newline@npm:^3.0.0": version: 3.1.0 resolution: "detect-newline@npm:3.1.0" checksum: ae6cd429c41ad01b164c59ea36f264a2c479598e61cba7c99da24175a7ab80ddf066420f2bec9a1c57a6bead411b4655ff15ad7d281c000a89791f48cbe939e7 languageName: node linkType: hard "diff-sequences@npm:^29.6.3": version: 29.6.3 resolution: "diff-sequences@npm:29.6.3" checksum: f4914158e1f2276343d98ff5b31fc004e7304f5470bf0f1adb2ac6955d85a531a6458d33e87667f98f6ae52ebd3891bb47d420bb48a5bd8b7a27ee25b20e33aa languageName: node linkType: hard "dir-glob@npm:^3.0.1": version: 3.0.1 resolution: "dir-glob@npm:3.0.1" dependencies: path-type: ^4.0.0 checksum: fa05e18324510d7283f55862f3161c6759a3f2f8dbce491a2fc14c8324c498286c54282c1f0e933cb930da8419b30679389499b919122952a4f8592362ef4615 languageName: node linkType: hard "doctrine@npm:^2.1.0": version: 2.1.0 resolution: "doctrine@npm:2.1.0" dependencies: esutils: ^2.0.2 checksum: a45e277f7feaed309fe658ace1ff286c6e2002ac515af0aaf37145b8baa96e49899638c7cd47dccf84c3d32abfc113246625b3ac8f552d1046072adee13b0dc8 languageName: node linkType: hard "doctrine@npm:^3.0.0": version: 3.0.0 resolution: "doctrine@npm:3.0.0" dependencies: esutils: ^2.0.2 checksum: fd7673ca77fe26cd5cba38d816bc72d641f500f1f9b25b83e8ce28827fe2da7ad583a8da26ab6af85f834138cf8dae9f69b0cd6ab925f52ddab1754db44d99ce languageName: node linkType: hard "eastasianwidth@npm:^0.2.0": version: 0.2.0 resolution: "eastasianwidth@npm:0.2.0" checksum: 7d00d7cd8e49b9afa762a813faac332dee781932d6f2c848dc348939c4253f1d4564341b7af1d041853bc3f32c2ef141b58e0a4d9862c17a7f08f68df1e0f1ed languageName: node linkType: hard "electron-to-chromium@npm:^1.4.477": version: 1.4.501 resolution: "electron-to-chromium@npm:1.4.501" checksum: b573e0c4bdf1036a39071825cdfc0dfcf04a5cce46eab246db799d13ee5745089a72af2b18d81402ca2aac1dc0af468480470d9d677ba51eecaef3481bace2b3 languageName: node linkType: hard "emittery@npm:^0.13.1": version: 0.13.1 resolution: "emittery@npm:0.13.1" checksum: 2b089ab6306f38feaabf4f6f02792f9ec85fc054fda79f44f6790e61bbf6bc4e1616afb9b232e0c5ec5289a8a452f79bfa6d905a6fd64e94b49981f0934001c6 languageName: node linkType: hard "emittery@npm:^1.0.1": version: 1.0.1 resolution: "emittery@npm:1.0.1" checksum: d95faee6ffb2e023cadaa6804265fea5298c53d079f170112af8dfae3e141761363ea4510966128259346418e3ec7639310fd75059ecce2423bf8afd07004226 languageName: node linkType: hard "emoji-regex@npm:^8.0.0": version: 8.0.0 resolution: "emoji-regex@npm:8.0.0" checksum: d4c5c39d5a9868b5fa152f00cada8a936868fd3367f33f71be515ecee4c803132d11b31a6222b2571b1e5f7e13890156a94880345594d0ce7e3c9895f560f192 languageName: node linkType: hard "emoji-regex@npm:^9.2.2": version: 9.2.2 resolution: "emoji-regex@npm:9.2.2" checksum: 8487182da74aabd810ac6d6f1994111dfc0e331b01271ae01ec1eb0ad7b5ecc2bbbbd2f053c05cb55a1ac30449527d819bbfbf0e3de1023db308cbcb47f86601 languageName: node linkType: hard "encoding@npm:^0.1.13": version: 0.1.13 resolution: "encoding@npm:0.1.13" dependencies: iconv-lite: ^0.6.2 checksum: bb98632f8ffa823996e508ce6a58ffcf5856330fde839ae42c9e1f436cc3b5cc651d4aeae72222916545428e54fd0f6aa8862fd8d25bdbcc4589f1e3f3715e7f languageName: node linkType: hard "env-paths@npm:^2.2.0": version: 2.2.1 resolution: "env-paths@npm:2.2.1" checksum: 65b5df55a8bab92229ab2b40dad3b387fad24613263d103a97f91c9fe43ceb21965cd3392b1ccb5d77088021e525c4e0481adb309625d0cb94ade1d1fb8dc17e languageName: node linkType: hard "err-code@npm:^2.0.2": version: 2.0.3 resolution: "err-code@npm:2.0.3" checksum: 8b7b1be20d2de12d2255c0bc2ca638b7af5171142693299416e6a9339bd7d88fc8d7707d913d78e0993176005405a236b066b45666b27b797252c771156ace54 languageName: node linkType: hard "error-ex@npm:^1.3.1": version: 1.3.2 resolution: "error-ex@npm:1.3.2" dependencies: is-arrayish: ^0.2.1 checksum: c1c2b8b65f9c91b0f9d75f0debaa7ec5b35c266c2cac5de412c1a6de86d4cbae04ae44e510378cb14d032d0645a36925d0186f8bb7367bcc629db256b743a001 languageName: node linkType: hard "es-abstract@npm:^1.19.0, es-abstract@npm:^1.20.4, es-abstract@npm:^1.21.2": version: 1.22.1 resolution: "es-abstract@npm:1.22.1" dependencies: array-buffer-byte-length: ^1.0.0 arraybuffer.prototype.slice: ^1.0.1 available-typed-arrays: ^1.0.5 call-bind: ^1.0.2 es-set-tostringtag: ^2.0.1 es-to-primitive: ^1.2.1 function.prototype.name: ^1.1.5 get-intrinsic: ^1.2.1 get-symbol-description: ^1.0.0 globalthis: ^1.0.3 gopd: ^1.0.1 has: ^1.0.3 has-property-descriptors: ^1.0.0 has-proto: ^1.0.1 has-symbols: ^1.0.3 internal-slot: ^1.0.5 is-array-buffer: ^3.0.2 is-callable: ^1.2.7 is-negative-zero: ^2.0.2 is-regex: ^1.1.4 is-shared-array-buffer: ^1.0.2 is-string: ^1.0.7 is-typed-array: ^1.1.10 is-weakref: ^1.0.2 object-inspect: ^1.12.3 object-keys: ^1.1.1 object.assign: ^4.1.4 regexp.prototype.flags: ^1.5.0 safe-array-concat: ^1.0.0 safe-regex-test: ^1.0.0 string.prototype.trim: ^1.2.7 string.prototype.trimend: ^1.0.6 string.prototype.trimstart: ^1.0.6 typed-array-buffer: ^1.0.0 typed-array-byte-length: ^1.0.0 typed-array-byte-offset: ^1.0.0 typed-array-length: ^1.0.4 unbox-primitive: ^1.0.2 which-typed-array: ^1.1.10 checksum: 614e2c1c3717cb8d30b6128ef12ea110e06fd7d75ad77091ca1c5dbfb00da130e62e4bbbbbdda190eada098a22b27fe0f99ae5a1171dac2c8663b1e8be8a3a9b languageName: node linkType: hard "es-set-tostringtag@npm:^2.0.1": version: 2.0.1 resolution: "es-set-tostringtag@npm:2.0.1" dependencies: get-intrinsic: ^1.1.3 has: ^1.0.3 has-tostringtag: ^1.0.0 checksum: ec416a12948cefb4b2a5932e62093a7cf36ddc3efd58d6c58ca7ae7064475ace556434b869b0bbeb0c365f1032a8ccd577211101234b69837ad83ad204fff884 languageName: node linkType: hard "es-shim-unscopables@npm:^1.0.0": version: 1.0.0 resolution: "es-shim-unscopables@npm:1.0.0" dependencies: has: ^1.0.3 checksum: 83e95cadbb6ee44d3644dfad60dcad7929edbc42c85e66c3e99aefd68a3a5c5665f2686885cddb47dfeabfd77bd5ea5a7060f2092a955a729bbd8834f0d86fa1 languageName: node linkType: hard "es-to-primitive@npm:^1.2.1": version: 1.2.1 resolution: "es-to-primitive@npm:1.2.1" dependencies: is-callable: ^1.1.4 is-date-object: ^1.0.1 is-symbol: ^1.0.2 checksum: 4ead6671a2c1402619bdd77f3503991232ca15e17e46222b0a41a5d81aebc8740a77822f5b3c965008e631153e9ef0580540007744521e72de8e33599fca2eed languageName: node linkType: hard "escalade@npm:^3.1.1": version: 3.1.1 resolution: "escalade@npm:3.1.1" checksum: a3e2a99f07acb74b3ad4989c48ca0c3140f69f923e56d0cba0526240ee470b91010f9d39001f2a4a313841d237ede70a729e92125191ba5d21e74b106800b133 languageName: node linkType: hard "escape-string-regexp@npm:5.0.0, escape-string-regexp@npm:^5.0.0": version: 5.0.0 resolution: "escape-string-regexp@npm:5.0.0" checksum: 20daabe197f3cb198ec28546deebcf24b3dbb1a5a269184381b3116d12f0532e06007f4bc8da25669d6a7f8efb68db0758df4cd981f57bc5b57f521a3e12c59e languageName: node linkType: hard "escape-string-regexp@npm:^1.0.5": version: 1.0.5 resolution: "escape-string-regexp@npm:1.0.5" checksum: 6092fda75c63b110c706b6a9bfde8a612ad595b628f0bd2147eea1d3406723020810e591effc7db1da91d80a71a737a313567c5abb3813e8d9c71f4aa595b410 languageName: node linkType: hard "escape-string-regexp@npm:^2.0.0": version: 2.0.0 resolution: "escape-string-regexp@npm:2.0.0" checksum: 9f8a2d5743677c16e85c810e3024d54f0c8dea6424fad3c79ef6666e81dd0846f7437f5e729dfcdac8981bc9e5294c39b4580814d114076b8d36318f46ae4395 languageName: node linkType: hard "escape-string-regexp@npm:^4.0.0": version: 4.0.0 resolution: "escape-string-regexp@npm:4.0.0" checksum: 98b48897d93060f2322108bf29db0feba7dd774be96cd069458d1453347b25ce8682ecc39859d4bca2203cc0ab19c237bcc71755eff49a0f8d90beadeeba5cc5 languageName: node linkType: hard "eslint-config-prettier@npm:^8.6.0": version: 8.10.0 resolution: "eslint-config-prettier@npm:8.10.0" peerDependencies: eslint: ">=7.0.0" bin: eslint-config-prettier: bin/cli.js checksum: 153266badd477e49b0759816246b2132f1dbdb6c7f313ca60a9af5822fd1071c2bc5684a3720d78b725452bbac04bb130878b2513aea5e72b1b792de5a69fec8 languageName: node linkType: hard "eslint-import-resolver-node@npm:^0.3.7": version: 0.3.9 resolution: "eslint-import-resolver-node@npm:0.3.9" dependencies: debug: ^3.2.7 is-core-module: ^2.13.0 resolve: ^1.22.4 checksum: 439b91271236b452d478d0522a44482e8c8540bf9df9bd744062ebb89ab45727a3acd03366a6ba2bdbcde8f9f718bab7fe8db64688aca75acf37e04eafd25e22 languageName: node linkType: hard "eslint-module-utils@npm:^2.8.0": version: 2.8.0 resolution: "eslint-module-utils@npm:2.8.0" dependencies: debug: ^3.2.7 peerDependenciesMeta: eslint: optional: true checksum: 74c6dfea7641ebcfe174be61168541a11a14aa8d72e515f5f09af55cd0d0862686104b0524aa4b8e0ce66418a44aa38a94d2588743db5fd07a6b49ffd16921d2 languageName: node linkType: hard "eslint-plugin-import@npm:^2.27.5": version: 2.28.1 resolution: "eslint-plugin-import@npm:2.28.1" dependencies: array-includes: ^3.1.6 array.prototype.findlastindex: ^1.2.2 array.prototype.flat: ^1.3.1 array.prototype.flatmap: ^1.3.1 debug: ^3.2.7 doctrine: ^2.1.0 eslint-import-resolver-node: ^0.3.7 eslint-module-utils: ^2.8.0 has: ^1.0.3 is-core-module: ^2.13.0 is-glob: ^4.0.3 minimatch: ^3.1.2 object.fromentries: ^2.0.6 object.groupby: ^1.0.0 object.values: ^1.1.6 semver: ^6.3.1 tsconfig-paths: ^3.14.2 peerDependencies: eslint: ^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8 checksum: e8ae6dd8f06d8adf685f9c1cfd46ac9e053e344a05c4090767e83b63a85c8421ada389807a39e73c643b9bff156715c122e89778169110ed68d6428e12607edf languageName: node linkType: hard "eslint-plugin-prettier@npm:^4.2.1": version: 4.2.1 resolution: "eslint-plugin-prettier@npm:4.2.1" dependencies: prettier-linter-helpers: ^1.0.0 peerDependencies: eslint: ">=7.28.0" prettier: ">=2.0.0" peerDependenciesMeta: eslint-config-prettier: optional: true checksum: b9e839d2334ad8ec7a5589c5cb0f219bded260839a857d7a486997f9870e95106aa59b8756ff3f37202085ebab658de382b0267cae44c3a7f0eb0bcc03a4f6d6 languageName: node linkType: hard "eslint-scope@npm:^5.1.1": version: 5.1.1 resolution: "eslint-scope@npm:5.1.1" dependencies: esrecurse: ^4.3.0 estraverse: ^4.1.1 checksum: 47e4b6a3f0cc29c7feedee6c67b225a2da7e155802c6ea13bbef4ac6b9e10c66cd2dcb987867ef176292bf4e64eccc680a49e35e9e9c669f4a02bac17e86abdb languageName: node linkType: hard "eslint-scope@npm:^7.2.2": version: 7.2.2 resolution: "eslint-scope@npm:7.2.2" dependencies: esrecurse: ^4.3.0 estraverse: ^5.2.0 checksum: ec97dbf5fb04b94e8f4c5a91a7f0a6dd3c55e46bfc7bbcd0e3138c3a76977570e02ed89a1810c778dcd72072ff0e9621ba1379b4babe53921d71e2e4486fda3e languageName: node linkType: hard "eslint-visitor-keys@npm:^3.3.0, eslint-visitor-keys@npm:^3.4.1, eslint-visitor-keys@npm:^3.4.3": version: 3.4.3 resolution: "eslint-visitor-keys@npm:3.4.3" checksum: 36e9ef87fca698b6fd7ca5ca35d7b2b6eeaaf106572e2f7fd31c12d3bfdaccdb587bba6d3621067e5aece31c8c3a348b93922ab8f7b2cbc6aaab5e1d89040c60 languageName: node linkType: hard "eslint@npm:^8.33.0": version: 8.47.0 resolution: "eslint@npm:8.47.0" dependencies: "@eslint-community/eslint-utils": ^4.2.0 "@eslint-community/regexpp": ^4.6.1 "@eslint/eslintrc": ^2.1.2 "@eslint/js": ^8.47.0 "@humanwhocodes/config-array": ^0.11.10 "@humanwhocodes/module-importer": ^1.0.1 "@nodelib/fs.walk": ^1.2.8 ajv: ^6.12.4 chalk: ^4.0.0 cross-spawn: ^7.0.2 debug: ^4.3.2 doctrine: ^3.0.0 escape-string-regexp: ^4.0.0 eslint-scope: ^7.2.2 eslint-visitor-keys: ^3.4.3 espree: ^9.6.1 esquery: ^1.4.2 esutils: ^2.0.2 fast-deep-equal: ^3.1.3 file-entry-cache: ^6.0.1 find-up: ^5.0.0 glob-parent: ^6.0.2 globals: ^13.19.0 graphemer: ^1.4.0 ignore: ^5.2.0 imurmurhash: ^0.1.4 is-glob: ^4.0.0 is-path-inside: ^3.0.3 js-yaml: ^4.1.0 json-stable-stringify-without-jsonify: ^1.0.1 levn: ^0.4.1 lodash.merge: ^4.6.2 minimatch: ^3.1.2 natural-compare: ^1.4.0 optionator: ^0.9.3 strip-ansi: ^6.0.1 text-table: ^0.2.0 bin: eslint: bin/eslint.js checksum: 1988617f703eadc5c7540468d54dc8e5171cf2bb9483f6172799cd1ff54a9a5e4470f003784e8cef92687eaa14de37172732787040e67817581a20bcb9c15970 languageName: node linkType: hard "espree@npm:^9.6.0, espree@npm:^9.6.1": version: 9.6.1 resolution: "espree@npm:9.6.1" dependencies: acorn: ^8.9.0 acorn-jsx: ^5.3.2 eslint-visitor-keys: ^3.4.1 checksum: eb8c149c7a2a77b3f33a5af80c10875c3abd65450f60b8af6db1bfcfa8f101e21c1e56a561c6dc13b848e18148d43469e7cd208506238554fb5395a9ea5a1ab9 languageName: node linkType: hard "esprima@npm:^4.0.0": version: 4.0.1 resolution: "esprima@npm:4.0.1" bin: esparse: ./bin/esparse.js esvalidate: ./bin/esvalidate.js checksum: b45bc805a613dbea2835278c306b91aff6173c8d034223fa81498c77dcbce3b2931bf6006db816f62eacd9fd4ea975dfd85a5b7f3c6402cfd050d4ca3c13a628 languageName: node linkType: hard "esquery@npm:^1.4.2": version: 1.5.0 resolution: "esquery@npm:1.5.0" dependencies: estraverse: ^5.1.0 checksum: aefb0d2596c230118656cd4ec7532d447333a410a48834d80ea648b1e7b5c9bc9ed8b5e33a89cb04e487b60d622f44cf5713bf4abed7c97343edefdc84a35900 languageName: node linkType: hard "esrecurse@npm:^4.3.0": version: 4.3.0 resolution: "esrecurse@npm:4.3.0" dependencies: estraverse: ^5.2.0 checksum: ebc17b1a33c51cef46fdc28b958994b1dc43cd2e86237515cbc3b4e5d2be6a811b2315d0a1a4d9d340b6d2308b15322f5c8291059521cc5f4802f65e7ec32837 languageName: node linkType: hard "estraverse@npm:^4.1.1": version: 4.3.0 resolution: "estraverse@npm:4.3.0" checksum: a6299491f9940bb246124a8d44b7b7a413a8336f5436f9837aaa9330209bd9ee8af7e91a654a3545aee9c54b3308e78ee360cef1d777d37cfef77d2fa33b5827 languageName: node linkType: hard "estraverse@npm:^5.1.0, estraverse@npm:^5.2.0": version: 5.3.0 resolution: "estraverse@npm:5.3.0" checksum: 072780882dc8416ad144f8fe199628d2b3e7bbc9989d9ed43795d2c90309a2047e6bc5979d7e2322a341163d22cfad9e21f4110597fe487519697389497e4e2b languageName: node linkType: hard "esutils@npm:^2.0.2, esutils@npm:^2.0.3": version: 2.0.3 resolution: "esutils@npm:2.0.3" checksum: 22b5b08f74737379a840b8ed2036a5fb35826c709ab000683b092d9054e5c2a82c27818f12604bfc2a9a76b90b6834ef081edbc1c7ae30d1627012e067c6ec87 languageName: node linkType: hard "eventemitter3@npm:^5.0.1": version: 5.0.1 resolution: "eventemitter3@npm:5.0.1" checksum: 543d6c858ab699303c3c32e0f0f47fc64d360bf73c3daf0ac0b5079710e340d6fe9f15487f94e66c629f5f82cd1a8678d692f3dbb6f6fcd1190e1b97fcad36f8 languageName: node linkType: hard "execa@npm:7.2.0": version: 7.2.0 resolution: "execa@npm:7.2.0" dependencies: cross-spawn: ^7.0.3 get-stream: ^6.0.1 human-signals: ^4.3.0 is-stream: ^3.0.0 merge-stream: ^2.0.0 npm-run-path: ^5.1.0 onetime: ^6.0.0 signal-exit: ^3.0.7 strip-final-newline: ^3.0.0 checksum: 14fd17ba0ca8c87b277584d93b1d9fc24f2a65e5152b31d5eb159a3b814854283eaae5f51efa9525e304447e2f757c691877f7adff8fde5746aae67eb1edd1cc languageName: node linkType: hard "execa@npm:^5.0.0": version: 5.1.1 resolution: "execa@npm:5.1.1" dependencies: cross-spawn: ^7.0.3 get-stream: ^6.0.0 human-signals: ^2.1.0 is-stream: ^2.0.0 merge-stream: ^2.0.0 npm-run-path: ^4.0.1 onetime: ^5.1.2 signal-exit: ^3.0.3 strip-final-newline: ^2.0.0 checksum: fba9022c8c8c15ed862847e94c252b3d946036d7547af310e344a527e59021fd8b6bb0723883ea87044dc4f0201f949046993124a42ccb0855cae5bf8c786343 languageName: node linkType: hard "exit@npm:^0.1.2": version: 0.1.2 resolution: "exit@npm:0.1.2" checksum: abc407f07a875c3961e4781dfcb743b58d6c93de9ab263f4f8c9d23bb6da5f9b7764fc773f86b43dd88030444d5ab8abcb611cb680fba8ca075362b77114bba3 languageName: node linkType: hard "expect@npm:^29.0.0, expect@npm:^29.6.4": version: 29.6.4 resolution: "expect@npm:29.6.4" dependencies: "@jest/expect-utils": ^29.6.4 jest-get-type: ^29.6.3 jest-matcher-utils: ^29.6.4 jest-message-util: ^29.6.3 jest-util: ^29.6.3 checksum: 019b187d665562e4948b239e011a8791363e916f3076a229298d625e67fdadb06e8c2748798c49b4cf418ea223673eadd1de06537e08ba3c055c6f0efefc2306 languageName: node linkType: hard "exponential-backoff@npm:^3.1.1": version: 3.1.1 resolution: "exponential-backoff@npm:3.1.1" checksum: 3d21519a4f8207c99f7457287291316306255a328770d320b401114ec8481986e4e467e854cb9914dd965e0a1ca810a23ccb559c642c88f4c7f55c55778a9b48 languageName: node linkType: hard "fast-deep-equal@npm:^3.1.1, fast-deep-equal@npm:^3.1.3": version: 3.1.3 resolution: "fast-deep-equal@npm:3.1.3" checksum: e21a9d8d84f53493b6aa15efc9cfd53dd5b714a1f23f67fb5dc8f574af80df889b3bce25dc081887c6d25457cce704e636395333abad896ccdec03abaf1f3f9d languageName: node linkType: hard "fast-diff@npm:^1.1.2, fast-diff@npm:^1.2.0": version: 1.2.0 resolution: "fast-diff@npm:1.2.0" checksum: 1b5306eaa9e826564d9e5ffcd6ebd881eb5f770b3f977fcbf38f05c824e42172b53c79920e8429c54eb742ce15a0caf268b0fdd5b38f6de52234c4a8368131ae languageName: node linkType: hard "fast-glob@npm:^3.2.9, fast-glob@npm:^3.3.0": version: 3.3.1 resolution: "fast-glob@npm:3.3.1" dependencies: "@nodelib/fs.stat": ^2.0.2 "@nodelib/fs.walk": ^1.2.3 glob-parent: ^5.1.2 merge2: ^1.3.0 micromatch: ^4.0.4 checksum: b6f3add6403e02cf3a798bfbb1183d0f6da2afd368f27456010c0bc1f9640aea308243d4cb2c0ab142f618276e65ecb8be1661d7c62a7b4e5ba774b9ce5432e5 languageName: node linkType: hard "fast-json-stable-stringify@npm:2.x, fast-json-stable-stringify@npm:^2.0.0, fast-json-stable-stringify@npm:^2.1.0": version: 2.1.0 resolution: "fast-json-stable-stringify@npm:2.1.0" checksum: b191531e36c607977e5b1c47811158733c34ccb3bfde92c44798929e9b4154884378536d26ad90dfecd32e1ffc09c545d23535ad91b3161a27ddbb8ebe0cbecb languageName: node linkType: hard "fast-levenshtein@npm:^2.0.6": version: 2.0.6 resolution: "fast-levenshtein@npm:2.0.6" checksum: 92cfec0a8dfafd9c7a15fba8f2cc29cd0b62b85f056d99ce448bbcd9f708e18ab2764bda4dd5158364f4145a7c72788538994f0d1787b956ef0d1062b0f7c24c languageName: node linkType: hard "fastq@npm:^1.6.0": version: 1.15.0 resolution: "fastq@npm:1.15.0" dependencies: reusify: ^1.0.4 checksum: 0170e6bfcd5d57a70412440b8ef600da6de3b2a6c5966aeaf0a852d542daff506a0ee92d6de7679d1de82e644bce69d7a574a6c93f0b03964b5337eed75ada1a languageName: node linkType: hard "fb-watchman@npm:^2.0.0": version: 2.0.2 resolution: "fb-watchman@npm:2.0.2" dependencies: bser: 2.1.1 checksum: b15a124cef28916fe07b400eb87cbc73ca082c142abf7ca8e8de6af43eca79ca7bd13eb4d4d48240b3bd3136eaac40d16e42d6edf87a8e5d1dd8070626860c78 languageName: node linkType: hard "figures@npm:^5.0.0": version: 5.0.0 resolution: "figures@npm:5.0.0" dependencies: escape-string-regexp: ^5.0.0 is-unicode-supported: ^1.2.0 checksum: e6e8b6d1df2f554d4effae4a5ceff5d796f9449f6d4e912d74dab7d5f25916ecda6c305b9084833157d56485a0c78b37164430ddc5675bcee1330e346710669e languageName: node linkType: hard "file-entry-cache@npm:^6.0.1": version: 6.0.1 resolution: "file-entry-cache@npm:6.0.1" dependencies: flat-cache: ^3.0.4 checksum: f49701feaa6314c8127c3c2f6173cfefff17612f5ed2daaafc6da13b5c91fd43e3b2a58fd0d63f9f94478a501b167615931e7200e31485e320f74a33885a9c74 languageName: node linkType: hard "fill-range@npm:^7.0.1": version: 7.0.1 resolution: "fill-range@npm:7.0.1" dependencies: to-regex-range: ^5.0.1 checksum: cc283f4e65b504259e64fd969bcf4def4eb08d85565e906b7d36516e87819db52029a76b6363d0f02d0d532f0033c9603b9e2d943d56ee3b0d4f7ad3328ff917 languageName: node linkType: hard "find-up@npm:^4.0.0, find-up@npm:^4.1.0": version: 4.1.0 resolution: "find-up@npm:4.1.0" dependencies: locate-path: ^5.0.0 path-exists: ^4.0.0 checksum: 4c172680e8f8c1f78839486e14a43ef82e9decd0e74145f40707cc42e7420506d5ec92d9a11c22bd2c48fb0c384ea05dd30e10dd152fefeec6f2f75282a8b844 languageName: node linkType: hard "find-up@npm:^5.0.0": version: 5.0.0 resolution: "find-up@npm:5.0.0" dependencies: locate-path: ^6.0.0 path-exists: ^4.0.0 checksum: 07955e357348f34660bde7920783204ff5a26ac2cafcaa28bace494027158a97b9f56faaf2d89a6106211a8174db650dd9f503f9c0d526b1202d5554a00b9095 languageName: node linkType: hard "find-up@npm:^6.0.0": version: 6.3.0 resolution: "find-up@npm:6.3.0" dependencies: locate-path: ^7.1.0 path-exists: ^5.0.0 checksum: 9a21b7f9244a420e54c6df95b4f6fc3941efd3c3e5476f8274eb452f6a85706e7a6a90de71353ee4f091fcb4593271a6f92810a324ec542650398f928783c280 languageName: node linkType: hard "flat-cache@npm:^3.0.4": version: 3.0.4 resolution: "flat-cache@npm:3.0.4" dependencies: flatted: ^3.1.0 rimraf: ^3.0.2 checksum: 4fdd10ecbcbf7d520f9040dd1340eb5dfe951e6f0ecf2252edeec03ee68d989ec8b9a20f4434270e71bcfd57800dc09b3344fca3966b2eb8f613072c7d9a2365 languageName: node linkType: hard "flatted@npm:^3.1.0": version: 3.2.2 resolution: "flatted@npm:3.2.2" checksum: 9d5e03fd9309b9103f345cf6d0cef4fa46201baa053b0ca3d57fa489449b0bee687b7355407898f630afbb1a1286d2a6658e7e77dea3b85c3cd6c6ce2894a5c3 languageName: node linkType: hard "for-each@npm:^0.3.3": version: 0.3.3 resolution: "for-each@npm:0.3.3" dependencies: is-callable: ^1.1.3 checksum: 6c48ff2bc63362319c65e2edca4a8e1e3483a2fabc72fbe7feaf8c73db94fc7861bd53bc02c8a66a0c1dd709da6b04eec42e0abdd6b40ce47305ae92a25e5d28 languageName: node linkType: hard "foreground-child@npm:^3.1.0": version: 3.1.1 resolution: "foreground-child@npm:3.1.1" dependencies: cross-spawn: ^7.0.0 signal-exit: ^4.0.1 checksum: 139d270bc82dc9e6f8bc045fe2aae4001dc2472157044fdfad376d0a3457f77857fa883c1c8b21b491c6caade9a926a4bed3d3d2e8d3c9202b151a4cbbd0bcd5 languageName: node linkType: hard "fs-extra@npm:^10.0.0": version: 10.1.0 resolution: "fs-extra@npm:10.1.0" dependencies: graceful-fs: ^4.2.0 jsonfile: ^6.0.1 universalify: ^2.0.0 checksum: dc94ab37096f813cc3ca12f0f1b5ad6744dfed9ed21e953d72530d103cea193c2f81584a39e9dee1bea36de5ee66805678c0dddc048e8af1427ac19c00fffc50 languageName: node linkType: hard "fs-minipass@npm:^2.0.0": version: 2.1.0 resolution: "fs-minipass@npm:2.1.0" dependencies: minipass: ^3.0.0 checksum: 1b8d128dae2ac6cc94230cc5ead341ba3e0efaef82dab46a33d171c044caaa6ca001364178d42069b2809c35a1c3c35079a32107c770e9ffab3901b59af8c8b1 languageName: node linkType: hard "fs-minipass@npm:^3.0.0": version: 3.0.3 resolution: "fs-minipass@npm:3.0.3" dependencies: minipass: ^7.0.3 checksum: 8722a41109130851d979222d3ec88aabaceeaaf8f57b2a8f744ef8bd2d1ce95453b04a61daa0078822bc5cd21e008814f06fe6586f56fef511e71b8d2394d802 languageName: node linkType: hard "fs.realpath@npm:^1.0.0": version: 1.0.0 resolution: "fs.realpath@npm:1.0.0" checksum: 99ddea01a7e75aa276c250a04eedeffe5662bce66c65c07164ad6264f9de18fb21be9433ead460e54cff20e31721c811f4fb5d70591799df5f85dce6d6746fd0 languageName: node linkType: hard "fsevents@npm:^2.3.2, fsevents@npm:~2.3.2": version: 2.3.3 resolution: "fsevents@npm:2.3.3" dependencies: node-gyp: latest checksum: 11e6ea6fea15e42461fc55b4b0e4a0a3c654faa567f1877dbd353f39156f69def97a69936d1746619d656c4b93de2238bf731f6085a03a50cabf287c9d024317 conditions: os=darwin languageName: node linkType: hard "fsevents@patch:fsevents@^2.3.2#~builtin<compat/fsevents>, fsevents@patch:fsevents@~2.3.2#~builtin<compat/fsevents>": version: 2.3.3 resolution: "fsevents@patch:fsevents@npm%3A2.3.3#~builtin<compat/fsevents>::version=2.3.3&hash=df0bf1" dependencies: node-gyp: latest conditions: os=darwin languageName: node linkType: hard "function-bind@npm:^1.1.1": version: 1.1.1 resolution: "function-bind@npm:1.1.1" checksum: b32fbaebb3f8ec4969f033073b43f5c8befbb58f1a79e12f1d7490358150359ebd92f49e72ff0144f65f2c48ea2a605bff2d07965f548f6474fd8efd95bf361a languageName: node linkType: hard "function.prototype.name@npm:^1.1.5": version: 1.1.5 resolution: "function.prototype.name@npm:1.1.5" dependencies: call-bind: ^1.0.2 define-properties: ^1.1.3 es-abstract: ^1.19.0 functions-have-names: ^1.2.2 checksum: acd21d733a9b649c2c442f067567743214af5fa248dbeee69d8278ce7df3329ea5abac572be9f7470b4ec1cd4d8f1040e3c5caccf98ebf2bf861a0deab735c27 languageName: node linkType: hard "functions-have-names@npm:^1.2.2, functions-have-names@npm:^1.2.3": version: 1.2.3 resolution: "functions-have-names@npm:1.2.3" checksum: c3f1f5ba20f4e962efb71344ce0a40722163e85bee2101ce25f88214e78182d2d2476aa85ef37950c579eb6cf6ee811c17b3101bb84004bb75655f3e33f3fdb5 languageName: node linkType: hard "gauge@npm:^4.0.3": version: 4.0.4 resolution: "gauge@npm:4.0.4" dependencies: aproba: ^1.0.3 || ^2.0.0 color-support: ^1.1.3 console-control-strings: ^1.1.0 has-unicode: ^2.0.1 signal-exit: ^3.0.7 string-width: ^4.2.3 strip-ansi: ^6.0.1 wide-align: ^1.1.5 checksum: 788b6bfe52f1dd8e263cda800c26ac0ca2ff6de0b6eee2fe0d9e3abf15e149b651bd27bf5226be10e6e3edb5c4e5d5985a5a1a98137e7a892f75eff76467ad2d languageName: node linkType: hard "gensync@npm:^1.0.0-beta.2": version: 1.0.0-beta.2 resolution: "gensync@npm:1.0.0-beta.2" checksum: a7437e58c6be12aa6c90f7730eac7fa9833dc78872b4ad2963d2031b00a3367a93f98aec75f9aaac7220848e4026d67a8655e870b24f20a543d103c0d65952ec languageName: node linkType: hard "get-caller-file@npm:^2.0.5": version: 2.0.5 resolution: "get-caller-file@npm:2.0.5" checksum: b9769a836d2a98c3ee734a88ba712e62703f1df31b94b784762c433c27a386dd6029ff55c2a920c392e33657d80191edbf18c61487e198844844516f843496b9 languageName: node linkType: hard "get-intrinsic@npm:^1.0.2, get-intrinsic@npm:^1.1.1, get-intrinsic@npm:^1.1.3, get-intrinsic@npm:^1.2.0, get-intrinsic@npm:^1.2.1": version: 1.2.1 resolution: "get-intrinsic@npm:1.2.1" dependencies: function-bind: ^1.1.1 has: ^1.0.3 has-proto: ^1.0.1 has-symbols: ^1.0.3 checksum: 5b61d88552c24b0cf6fa2d1b3bc5459d7306f699de060d76442cce49a4721f52b8c560a33ab392cf5575b7810277d54ded9d4d39a1ea61855619ebc005aa7e5f languageName: node linkType: hard "get-package-type@npm:^0.1.0": version: 0.1.0 resolution: "get-package-type@npm:0.1.0" checksum: bba0811116d11e56d702682ddef7c73ba3481f114590e705fc549f4d868972263896af313c57a25c076e3c0d567e11d919a64ba1b30c879be985fc9d44f96148 languageName: node linkType: hard "get-stream@npm:^6.0.0, get-stream@npm:^6.0.1": version: 6.0.1 resolution: "get-stream@npm:6.0.1" checksum: e04ecece32c92eebf5b8c940f51468cd53554dcbb0ea725b2748be583c9523d00128137966afce410b9b051eb2ef16d657cd2b120ca8edafcf5a65e81af63cad languageName: node linkType: hard "get-symbol-description@npm:^1.0.0": version: 1.0.0 resolution: "get-symbol-description@npm:1.0.0" dependencies: call-bind: ^1.0.2 get-intrinsic: ^1.1.1 checksum: 9ceff8fe968f9270a37a1f73bf3f1f7bda69ca80f4f80850670e0e7b9444ff99323f7ac52f96567f8b5f5fbe7ac717a0d81d3407c7313e82810c6199446a5247 languageName: node linkType: hard "glob-parent@npm:^5.1.2, glob-parent@npm:~5.1.2": version: 5.1.2 resolution: "glob-parent@npm:5.1.2" dependencies: is-glob: ^4.0.1 checksum: f4f2bfe2425296e8a47e36864e4f42be38a996db40420fe434565e4480e3322f18eb37589617a98640c5dc8fdec1a387007ee18dbb1f3f5553409c34d17f425e languageName: node linkType: hard "glob-parent@npm:^6.0.2": version: 6.0.2 resolution: "glob-parent@npm:6.0.2" dependencies: is-glob: ^4.0.3 checksum: c13ee97978bef4f55106b71e66428eb1512e71a7466ba49025fc2aec59a5bfb0954d5abd58fc5ee6c9b076eef4e1f6d3375c2e964b88466ca390da4419a786a8 languageName: node linkType: hard "glob@npm:^10.2.2": version: 10.3.3 resolution: "glob@npm:10.3.3" dependencies: foreground-child: ^3.1.0 jackspeak: ^2.0.3 minimatch: ^9.0.1 minipass: ^5.0.0 || ^6.0.2 || ^7.0.0 path-scurry: ^1.10.1 bin: glob: dist/cjs/src/bin.js checksum: 29190d3291f422da0cb40b77a72fc8d2c51a36524e99b8bf412548b7676a6627489528b57250429612b6eec2e6fe7826d328451d3e694a9d15e575389308ec53 languageName: node linkType: hard "glob@npm:^7.1.3, glob@npm:^7.1.4": version: 7.1.6 resolution: "glob@npm:7.1.6" dependencies: fs.realpath: ^1.0.0 inflight: ^1.0.4 inherits: 2 minimatch: ^3.0.4 once: ^1.3.0 path-is-absolute: ^1.0.0 checksum: 351d549dd90553b87c2d3f90ce11aed9e1093c74130440e7ae0592e11bbcd2ce7f0ebb8ba6bfe63aaf9b62166a7f4c80cb84490ae5d78408bb2572bf7d4ee0a6 languageName: node linkType: hard "globals@npm:^11.1.0": version: 11.12.0 resolution: "globals@npm:11.12.0" checksum: 67051a45eca3db904aee189dfc7cd53c20c7d881679c93f6146ddd4c9f4ab2268e68a919df740d39c71f4445d2b38ee360fc234428baea1dbdfe68bbcb46979e languageName: node linkType: hard "globals@npm:^13.19.0": version: 13.21.0 resolution: "globals@npm:13.21.0" dependencies: type-fest: ^0.20.2 checksum: 86c92ca8a04efd864c10852cd9abb1ebe6d447dcc72936783e66eaba1087d7dba5c9c3421a48d6ca722c319378754dbcc3f3f732dbe47592d7de908edf58a773 languageName: node linkType: hard "globalthis@npm:^1.0.3": version: 1.0.3 resolution: "globalthis@npm:1.0.3" dependencies: define-properties: ^1.1.3 checksum: fbd7d760dc464c886d0196166d92e5ffb4c84d0730846d6621a39fbbc068aeeb9c8d1421ad330e94b7bca4bb4ea092f5f21f3d36077812af5d098b4dc006c998 languageName: node linkType: hard "globby@npm:^11.1.0": version: 11.1.0 resolution: "globby@npm:11.1.0" dependencies: array-union: ^2.1.0 dir-glob: ^3.0.1 fast-glob: ^3.2.9 ignore: ^5.2.0 merge2: ^1.4.1 slash: ^3.0.0 checksum: b4be8885e0cfa018fc783792942d53926c35c50b3aefd3fdcfb9d22c627639dc26bd2327a40a0b74b074100ce95bb7187bfeae2f236856aa3de183af7a02aea6 languageName: node linkType: hard "globby@npm:^13.1.4": version: 13.2.2 resolution: "globby@npm:13.2.2" dependencies: dir-glob: ^3.0.1 fast-glob: ^3.3.0 ignore: ^5.2.4 merge2: ^1.4.1 slash: ^4.0.0 checksum: f3d84ced58a901b4fcc29c846983108c426631fe47e94872868b65565495f7bee7b3defd68923bd480582771fd4bbe819217803a164a618ad76f1d22f666f41e languageName: node linkType: hard "gopd@npm:^1.0.1": version: 1.0.1 resolution: "gopd@npm:1.0.1" dependencies: get-intrinsic: ^1.1.3 checksum: a5ccfb8806e0917a94e0b3de2af2ea4979c1da920bc381667c260e00e7cafdbe844e2cb9c5bcfef4e5412e8bf73bab837285bc35c7ba73aaaf0134d4583393a6 languageName: node linkType: hard "graceful-fs@npm:^4.1.2, graceful-fs@npm:^4.1.6, graceful-fs@npm:^4.2.0, graceful-fs@npm:^4.2.6, graceful-fs@npm:^4.2.9": version: 4.2.11 resolution: "graceful-fs@npm:4.2.11" checksum: ac85f94da92d8eb6b7f5a8b20ce65e43d66761c55ce85ac96df6865308390da45a8d3f0296dd3a663de65d30ba497bd46c696cc1e248c72b13d6d567138a4fc7 languageName: node linkType: hard "graphemer@npm:^1.4.0": version: 1.4.0 resolution: "graphemer@npm:1.4.0" checksum: bab8f0be9b568857c7bec9fda95a89f87b783546d02951c40c33f84d05bb7da3fd10f863a9beb901463669b6583173a8c8cc6d6b306ea2b9b9d5d3d943c3a673 languageName: node linkType: hard "has-bigints@npm:^1.0.1, has-bigints@npm:^1.0.2": version: 1.0.2 resolution: "has-bigints@npm:1.0.2" checksum: 390e31e7be7e5c6fe68b81babb73dfc35d413604d7ee5f56da101417027a4b4ce6a27e46eff97ad040c835b5d228676eae99a9b5c3bc0e23c8e81a49241ff45b languageName: node linkType: hard "has-flag@npm:^3.0.0": version: 3.0.0 resolution: "has-flag@npm:3.0.0" checksum: 4a15638b454bf086c8148979aae044dd6e39d63904cd452d970374fa6a87623423da485dfb814e7be882e05c096a7ccf1ebd48e7e7501d0208d8384ff4dea73b languageName: node linkType: hard "has-flag@npm:^4.0.0": version: 4.0.0 resolution: "has-flag@npm:4.0.0" checksum: 261a1357037ead75e338156b1f9452c016a37dcd3283a972a30d9e4a87441ba372c8b81f818cd0fbcd9c0354b4ae7e18b9e1afa1971164aef6d18c2b6095a8ad languageName: node linkType: hard "has-property-descriptors@npm:^1.0.0": version: 1.0.0 resolution: "has-property-descriptors@npm:1.0.0" dependencies: get-intrinsic: ^1.1.1 checksum: a6d3f0a266d0294d972e354782e872e2fe1b6495b321e6ef678c9b7a06a40408a6891817350c62e752adced73a94ac903c54734fee05bf65b1905ee1368194bb languageName: node linkType: hard "has-proto@npm:^1.0.1": version: 1.0.1 resolution: "has-proto@npm:1.0.1" checksum: febc5b5b531de8022806ad7407935e2135f1cc9e64636c3916c6842bd7995994ca3b29871ecd7954bd35f9e2986c17b3b227880484d22259e2f8e6ce63fd383e languageName: node linkType: hard "has-symbols@npm:^1.0.2, has-symbols@npm:^1.0.3": version: 1.0.3 resolution: "has-symbols@npm:1.0.3" checksum: a054c40c631c0d5741a8285010a0777ea0c068f99ed43e5d6eb12972da223f8af553a455132fdb0801bdcfa0e0f443c0c03a68d8555aa529b3144b446c3f2410 languageName: node linkType: hard "has-tostringtag@npm:^1.0.0": version: 1.0.0 resolution: "has-tostringtag@npm:1.0.0" dependencies: has-symbols: ^1.0.2 checksum: cc12eb28cb6ae22369ebaad3a8ab0799ed61270991be88f208d508076a1e99abe4198c965935ce85ea90b60c94ddda73693b0920b58e7ead048b4a391b502c1c languageName: node linkType: hard "has-unicode@npm:^2.0.1": version: 2.0.1 resolution: "has-unicode@npm:2.0.1" checksum: 1eab07a7436512db0be40a710b29b5dc21fa04880b7f63c9980b706683127e3c1b57cb80ea96d47991bdae2dfe479604f6a1ba410106ee1046a41d1bd0814400 languageName: node linkType: hard "has@npm:^1.0.3": version: 1.0.3 resolution: "has@npm:1.0.3" dependencies: function-bind: ^1.1.1 checksum: b9ad53d53be4af90ce5d1c38331e712522417d017d5ef1ebd0507e07c2fbad8686fffb8e12ddecd4c39ca9b9b47431afbb975b8abf7f3c3b82c98e9aad052792 languageName: node linkType: hard "hosted-git-info@npm:^2.1.4": version: 2.8.9 resolution: "hosted-git-info@npm:2.8.9" checksum: c955394bdab888a1e9bb10eb33029e0f7ce5a2ac7b3f158099dc8c486c99e73809dca609f5694b223920ca2174db33d32b12f9a2a47141dc59607c29da5a62dd languageName: node linkType: hard "html-escaper@npm:^2.0.0": version: 2.0.2 resolution: "html-escaper@npm:2.0.2" checksum: d2df2da3ad40ca9ee3a39c5cc6475ef67c8f83c234475f24d8e9ce0dc80a2c82df8e1d6fa78ddd1e9022a586ea1bd247a615e80a5cd9273d90111ddda7d9e974 languageName: node linkType: hard "http-cache-semantics@npm:^4.1.1": version: 4.1.1 resolution: "http-cache-semantics@npm:4.1.1" checksum: 83ac0bc60b17a3a36f9953e7be55e5c8f41acc61b22583060e8dedc9dd5e3607c823a88d0926f9150e571f90946835c7fe150732801010845c72cd8bbff1a236 languageName: node linkType: hard "http-proxy-agent@npm:^5.0.0": version: 5.0.0 resolution: "http-proxy-agent@npm:5.0.0" dependencies: "@tootallnate/once": 2 agent-base: 6 debug: 4 checksum: e2ee1ff1656a131953839b2a19cd1f3a52d97c25ba87bd2559af6ae87114abf60971e498021f9b73f9fd78aea8876d1fb0d4656aac8a03c6caa9fc175f22b786 languageName: node linkType: hard "https-proxy-agent@npm:^5.0.0": version: 5.0.1 resolution: "https-proxy-agent@npm:5.0.1" dependencies: agent-base: 6 debug: 4 checksum: 571fccdf38184f05943e12d37d6ce38197becdd69e58d03f43637f7fa1269cf303a7d228aa27e5b27bbd3af8f09fd938e1c91dcfefff2df7ba77c20ed8dfc765 languageName: node linkType: hard "human-signals@npm:^2.1.0": version: 2.1.0 resolution: "human-signals@npm:2.1.0" checksum: b87fd89fce72391625271454e70f67fe405277415b48bcc0117ca73d31fa23a4241787afdc8d67f5a116cf37258c052f59ea82daffa72364d61351423848e3b8 languageName: node linkType: hard "human-signals@npm:^4.3.0": version: 4.3.1 resolution: "human-signals@npm:4.3.1" checksum: 6f12958df3f21b6fdaf02d90896c271df00636a31e2bbea05bddf817a35c66b38a6fdac5863e2df85bd52f34958997f1f50350ff97249e1dff8452865d5235d1 languageName: node linkType: hard "humanize-ms@npm:^1.2.1": version: 1.2.1 resolution: "humanize-ms@npm:1.2.1" dependencies: ms: ^2.0.0 checksum: 9c7a74a2827f9294c009266c82031030eae811ca87b0da3dceb8d6071b9bde22c9f3daef0469c3c533cc67a97d8a167cd9fc0389350e5f415f61a79b171ded16 languageName: node linkType: hard "husky@npm:^8.0.3": version: 8.0.3 resolution: "husky@npm:8.0.3" bin: husky: lib/bin.js checksum: 837bc7e4413e58c1f2946d38fb050f5d7324c6f16b0fd66411ffce5703b294bd21429e8ba58711cd331951ee86ed529c5be4f76805959ff668a337dbfa82a1b0 languageName: node linkType: hard "iconv-lite@npm:^0.6.2": version: 0.6.3 resolution: "iconv-lite@npm:0.6.3" dependencies: safer-buffer: ">= 2.1.2 < 3.0.0" checksum: 3f60d47a5c8fc3313317edfd29a00a692cc87a19cac0159e2ce711d0ebc9019064108323b5e493625e25594f11c6236647d8e256fbe7a58f4a3b33b89e6d30bf languageName: node linkType: hard "ignore-by-default@npm:^2.1.0": version: 2.1.0 resolution: "ignore-by-default@npm:2.1.0" checksum: 2b2df4622b6a07a3e91893987be8f060dc553f7736b67e72aa2312041c450a6fa8371733d03c42f45a02e47ec824e961c2fba63a3d94fc59cbd669220a5b0d7a languageName: node linkType: hard "ignore@npm:^5.2.0, ignore@npm:^5.2.4": version: 5.2.4 resolution: "ignore@npm:5.2.4" checksum: 3d4c309c6006e2621659311783eaea7ebcd41fe4ca1d78c91c473157ad6666a57a2df790fe0d07a12300d9aac2888204d7be8d59f9aaf665b1c7fcdb432517ef languageName: node linkType: hard "import-fresh@npm:^3.2.1": version: 3.3.0 resolution: "import-fresh@npm:3.3.0" dependencies: parent-module: ^1.0.0 resolve-from: ^4.0.0 checksum: 2cacfad06e652b1edc50be650f7ec3be08c5e5a6f6d12d035c440a42a8cc028e60a5b99ca08a77ab4d6b1346da7d971915828f33cdab730d3d42f08242d09baa languageName: node linkType: hard "import-local@npm:^3.0.2": version: 3.1.0 resolution: "import-local@npm:3.1.0" dependencies: pkg-dir: ^4.2.0 resolve-cwd: ^3.0.0 bin: import-local-fixture: fixtures/cli.js checksum: bfcdb63b5e3c0e245e347f3107564035b128a414c4da1172a20dc67db2504e05ede4ac2eee1252359f78b0bfd7b19ef180aec427c2fce6493ae782d73a04cddd languageName: node linkType: hard "imurmurhash@npm:^0.1.4": version: 0.1.4 resolution: "imurmurhash@npm:0.1.4" checksum: 7cae75c8cd9a50f57dadd77482359f659eaebac0319dd9368bcd1714f55e65badd6929ca58569da2b6494ef13fdd5598cd700b1eba23f8b79c5f19d195a3ecf7 languageName: node linkType: hard "indent-string@npm:^4.0.0": version: 4.0.0 resolution: "indent-string@npm:4.0.0" checksum: 824cfb9929d031dabf059bebfe08cf3137365e112019086ed3dcff6a0a7b698cb80cf67ccccde0e25b9e2d7527aa6cc1fed1ac490c752162496caba3e6699612 languageName: node linkType: hard "indent-string@npm:^5.0.0": version: 5.0.0 resolution: "indent-string@npm:5.0.0" checksum: e466c27b6373440e6d84fbc19e750219ce25865cb82d578e41a6053d727e5520dc5725217d6eb1cc76005a1bb1696a0f106d84ce7ebda3033b963a38583fb3b3 languageName: node linkType: hard "inflight@npm:^1.0.4": version: 1.0.6 resolution: "inflight@npm:1.0.6" dependencies: once: ^1.3.0 wrappy: 1 checksum: f4f76aa072ce19fae87ce1ef7d221e709afb59d445e05d47fba710e85470923a75de35bfae47da6de1b18afc3ce83d70facf44cfb0aff89f0a3f45c0a0244dfd languageName: node linkType: hard "inherits@npm:2, inherits@npm:^2.0.3": version: 2.0.4 resolution: "inherits@npm:2.0.4" checksum: 4a48a733847879d6cf6691860a6b1e3f0f4754176e4d71494c41f3475553768b10f84b5ce1d40fbd0e34e6bfbb864ee35858ad4dd2cf31e02fc4a154b724d7f1 languageName: node linkType: hard "internal-slot@npm:^1.0.5": version: 1.0.5 resolution: "internal-slot@npm:1.0.5" dependencies: get-intrinsic: ^1.2.0 has: ^1.0.3 side-channel: ^1.0.4 checksum: 97e84046bf9e7574d0956bd98d7162313ce7057883b6db6c5c7b5e5f05688864b0978ba07610c726d15d66544ffe4b1050107d93f8a39ebc59b15d8b429b497a languageName: node linkType: hard "ip@npm:^2.0.0": version: 2.0.0 resolution: "ip@npm:2.0.0" checksum: cfcfac6b873b701996d71ec82a7dd27ba92450afdb421e356f44044ed688df04567344c36cbacea7d01b1c39a4c732dc012570ebe9bebfb06f27314bca625349 languageName: node linkType: hard "irregular-plurals@npm:^3.3.0": version: 3.5.0 resolution: "irregular-plurals@npm:3.5.0" checksum: 5b663091dc89155df7b2e9d053e8fb11941a0c4be95c4b6549ed3ea020489fdf4f75ea586c915b5b543704252679a5a6e8c6c3587da5ac3fc57b12da90a9aee7 languageName: node linkType: hard "is-array-buffer@npm:^3.0.1, is-array-buffer@npm:^3.0.2": version: 3.0.2 resolution: "is-array-buffer@npm:3.0.2" dependencies: call-bind: ^1.0.2 get-intrinsic: ^1.2.0 is-typed-array: ^1.1.10 checksum: dcac9dda66ff17df9cabdc58214172bf41082f956eab30bb0d86bc0fab1e44b690fc8e1f855cf2481245caf4e8a5a006a982a71ddccec84032ed41f9d8da8c14 languageName: node linkType: hard "is-arrayish@npm:^0.2.1": version: 0.2.1 resolution: "is-arrayish@npm:0.2.1" checksum: eef4417e3c10e60e2c810b6084942b3ead455af16c4509959a27e490e7aee87cfb3f38e01bbde92220b528a0ee1a18d52b787e1458ee86174d8c7f0e58cd488f languageName: node linkType: hard "is-bigint@npm:^1.0.1": version: 1.0.4 resolution: "is-bigint@npm:1.0.4" dependencies: has-bigints: ^1.0.1 checksum: c56edfe09b1154f8668e53ebe8252b6f185ee852a50f9b41e8d921cb2bed425652049fbe438723f6cb48a63ca1aa051e948e7e401e093477c99c84eba244f666 languageName: node linkType: hard "is-binary-path@npm:~2.1.0": version: 2.1.0 resolution: "is-binary-path@npm:2.1.0" dependencies: binary-extensions: ^2.0.0 checksum: 84192eb88cff70d320426f35ecd63c3d6d495da9d805b19bc65b518984b7c0760280e57dbf119b7e9be6b161784a5a673ab2c6abe83abb5198a432232ad5b35c languageName: node linkType: hard "is-boolean-object@npm:^1.1.0": version: 1.1.2 resolution: "is-boolean-object@npm:1.1.2" dependencies: call-bind: ^1.0.2 has-tostringtag: ^1.0.0 checksum: c03b23dbaacadc18940defb12c1c0e3aaece7553ef58b162a0f6bba0c2a7e1551b59f365b91e00d2dbac0522392d576ef322628cb1d036a0fe51eb466db67222 languageName: node linkType: hard "is-callable@npm:^1.1.3, is-callable@npm:^1.1.4, is-callable@npm:^1.2.7": version: 1.2.7 resolution: "is-callable@npm:1.2.7" checksum: 61fd57d03b0d984e2ed3720fb1c7a897827ea174bd44402878e059542ea8c4aeedee0ea0985998aa5cc2736b2fa6e271c08587addb5b3959ac52cf665173d1ac languageName: node linkType: hard "is-core-module@npm:^2.13.0": version: 2.13.0 resolution: "is-core-module@npm:2.13.0" dependencies: has: ^1.0.3 checksum: 053ab101fb390bfeb2333360fd131387bed54e476b26860dc7f5a700bbf34a0ec4454f7c8c4d43e8a0030957e4b3db6e16d35e1890ea6fb654c833095e040355 languageName: node linkType: hard "is-date-object@npm:^1.0.1": version: 1.0.5 resolution: "is-date-object@npm:1.0.5" dependencies: has-tostringtag: ^1.0.0 checksum: baa9077cdf15eb7b58c79398604ca57379b2fc4cf9aa7a9b9e295278648f628c9b201400c01c5e0f7afae56507d741185730307cbe7cad3b9f90a77e5ee342fc languageName: node linkType: hard "is-error@npm:^2.2.2": version: 2.2.2 resolution: "is-error@npm:2.2.2" checksum: a97b39587150f0d38f9f93f64699807fe3020fe5edbd63548f234dc2ba96fd7c776d66c062bf031dfeb93c7f48db563ff6bde588418ca041da37c659a416f055 languageName: node linkType: hard "is-extglob@npm:^2.1.1": version: 2.1.1 resolution: "is-extglob@npm:2.1.1" checksum: df033653d06d0eb567461e58a7a8c9f940bd8c22274b94bf7671ab36df5719791aae15eef6d83bbb5e23283967f2f984b8914559d4449efda578c775c4be6f85 languageName: node linkType: hard "is-fullwidth-code-point@npm:^3.0.0": version: 3.0.0 resolution: "is-fullwidth-code-point@npm:3.0.0" checksum: 44a30c29457c7fb8f00297bce733f0a64cd22eca270f83e58c105e0d015e45c019491a4ab2faef91ab51d4738c670daff901c799f6a700e27f7314029e99e348 languageName: node linkType: hard "is-fullwidth-code-point@npm:^4.0.0": version: 4.0.0 resolution: "is-fullwidth-code-point@npm:4.0.0" checksum: 8ae89bf5057bdf4f57b346fb6c55e9c3dd2549983d54191d722d5c739397a903012cc41a04ee3403fd872e811243ef91a7c5196da7b5841dc6b6aae31a264a8d languageName: node linkType: hard "is-generator-fn@npm:^2.0.0": version: 2.1.0 resolution: "is-generator-fn@npm:2.1.0" checksum: a6ad5492cf9d1746f73b6744e0c43c0020510b59d56ddcb78a91cbc173f09b5e6beff53d75c9c5a29feb618bfef2bf458e025ecf3a57ad2268e2fb2569f56215 languageName: node linkType: hard "is-glob@npm:^4.0.0, is-glob@npm:^4.0.1, is-glob@npm:^4.0.3, is-glob@npm:~4.0.1": version: 4.0.3 resolution: "is-glob@npm:4.0.3" dependencies: is-extglob: ^2.1.1 checksum: d381c1319fcb69d341cc6e6c7cd588e17cd94722d9a32dbd60660b993c4fb7d0f19438674e68dfec686d09b7c73139c9166b47597f846af387450224a8101ab4 languageName: node linkType: hard "is-lambda@npm:^1.0.1": version: 1.0.1 resolution: "is-lambda@npm:1.0.1" checksum: 93a32f01940220532e5948538699ad610d5924ac86093fcee83022252b363eb0cc99ba53ab084a04e4fb62bf7b5731f55496257a4c38adf87af9c4d352c71c35 languageName: node linkType: hard "is-negative-zero@npm:^2.0.2": version: 2.0.2 resolution: "is-negative-zero@npm:2.0.2" checksum: f3232194c47a549da60c3d509c9a09be442507616b69454716692e37ae9f37c4dea264fb208ad0c9f3efd15a796a46b79df07c7e53c6227c32170608b809149a languageName: node linkType: hard "is-number-object@npm:^1.0.4": version: 1.0.7 resolution: "is-number-object@npm:1.0.7" dependencies: has-tostringtag: ^1.0.0 checksum: d1e8d01bb0a7134c74649c4e62da0c6118a0bfc6771ea3c560914d52a627873e6920dd0fd0ebc0e12ad2ff4687eac4c308f7e80320b973b2c8a2c8f97a7524f7 languageName: node linkType: hard "is-number@npm:^7.0.0": version: 7.0.0 resolution: "is-number@npm:7.0.0" checksum: 456ac6f8e0f3111ed34668a624e45315201dff921e5ac181f8ec24923b99e9f32ca1a194912dc79d539c97d33dba17dc635202ff0b2cf98326f608323276d27a languageName: node linkType: hard "is-path-inside@npm:^3.0.3": version: 3.0.3 resolution: "is-path-inside@npm:3.0.3" checksum: abd50f06186a052b349c15e55b182326f1936c89a78bf6c8f2b707412517c097ce04bc49a0ca221787bc44e1049f51f09a2ffb63d22899051988d3a618ba13e9 languageName: node linkType: hard "is-plain-object@npm:^5.0.0": version: 5.0.0 resolution: "is-plain-object@npm:5.0.0" checksum: e32d27061eef62c0847d303125440a38660517e586f2f3db7c9d179ae5b6674ab0f469d519b2e25c147a1a3bc87156d0d5f4d8821e0ce4a9ee7fe1fcf11ce45c languageName: node linkType: hard "is-promise@npm:^4.0.0": version: 4.0.0 resolution: "is-promise@npm:4.0.0" checksum: 0b46517ad47b00b6358fd6553c83ec1f6ba9acd7ffb3d30a0bf519c5c69e7147c132430452351b8a9fc198f8dd6c4f76f8e6f5a7f100f8c77d57d9e0f4261a8a languageName: node linkType: hard "is-regex@npm:^1.1.4": version: 1.1.4 resolution: "is-regex@npm:1.1.4" dependencies: call-bind: ^1.0.2 has-tostringtag: ^1.0.0 checksum: 362399b33535bc8f386d96c45c9feb04cf7f8b41c182f54174c1a45c9abbbe5e31290bbad09a458583ff6bf3b2048672cdb1881b13289569a7c548370856a652 languageName: node linkType: hard "is-shared-array-buffer@npm:^1.0.2": version: 1.0.2 resolution: "is-shared-array-buffer@npm:1.0.2" dependencies: call-bind: ^1.0.2 checksum: 9508929cf14fdc1afc9d61d723c6e8d34f5e117f0bffda4d97e7a5d88c3a8681f633a74f8e3ad1fe92d5113f9b921dc5ca44356492079612f9a247efbce7032a languageName: node linkType: hard "is-stream@npm:^2.0.0": version: 2.0.1 resolution: "is-stream@npm:2.0.1" checksum: b8e05ccdf96ac330ea83c12450304d4a591f9958c11fd17bed240af8d5ffe08aedafa4c0f4cfccd4d28dc9d4d129daca1023633d5c11601a6cbc77521f6fae66 languageName: node linkType: hard "is-stream@npm:^3.0.0": version: 3.0.0 resolution: "is-stream@npm:3.0.0" checksum: 172093fe99119ffd07611ab6d1bcccfe8bc4aa80d864b15f43e63e54b7abc71e779acd69afdb854c4e2a67fdc16ae710e370eda40088d1cfc956a50ed82d8f16 languageName: node linkType: hard "is-string@npm:^1.0.5, is-string@npm:^1.0.7": version: 1.0.7 resolution: "is-string@npm:1.0.7" dependencies: has-tostringtag: ^1.0.0 checksum: 323b3d04622f78d45077cf89aab783b2f49d24dc641aa89b5ad1a72114cfeff2585efc8c12ef42466dff32bde93d839ad321b26884cf75e5a7892a938b089989 languageName: node linkType: hard "is-symbol@npm:^1.0.2, is-symbol@npm:^1.0.3": version: 1.0.4 resolution: "is-symbol@npm:1.0.4" dependencies: has-symbols: ^1.0.2 checksum: 92805812ef590738d9de49d677cd17dfd486794773fb6fa0032d16452af46e9b91bb43ffe82c983570f015b37136f4b53b28b8523bfb10b0ece7a66c31a54510 languageName: node linkType: hard "is-typed-array@npm:^1.1.10, is-typed-array@npm:^1.1.9": version: 1.1.12 resolution: "is-typed-array@npm:1.1.12" dependencies: which-typed-array: ^1.1.11 checksum: 4c89c4a3be07186caddadf92197b17fda663a9d259ea0d44a85f171558270d36059d1c386d34a12cba22dfade5aba497ce22778e866adc9406098c8fc4771796 languageName: node linkType: hard "is-unicode-supported@npm:^1.2.0": version: 1.3.0 resolution: "is-unicode-supported@npm:1.3.0" checksum: 20a1fc161afafaf49243551a5ac33b6c4cf0bbcce369fcd8f2951fbdd000c30698ce320de3ee6830497310a8f41880f8066d440aa3eb0a853e2aa4836dd89abc languageName: node linkType: hard "is-weakref@npm:^1.0.2": version: 1.0.2 resolution: "is-weakref@npm:1.0.2" dependencies: call-bind: ^1.0.2 checksum: 95bd9a57cdcb58c63b1c401c60a474b0f45b94719c30f548c891860f051bc2231575c290a6b420c6bc6e7ed99459d424c652bd5bf9a1d5259505dc35b4bf83de languageName: node linkType: hard "isarray@npm:^2.0.5": version: 2.0.5 resolution: "isarray@npm:2.0.5" checksum: bd5bbe4104438c4196ba58a54650116007fa0262eccef13a4c55b2e09a5b36b59f1e75b9fcc49883dd9d4953892e6fc007eef9e9155648ceea036e184b0f930a languageName: node linkType: hard "isexe@npm:^2.0.0": version: 2.0.0 resolution: "isexe@npm:2.0.0" checksum: 26bf6c5480dda5161c820c5b5c751ae1e766c587b1f951ea3fcfc973bafb7831ae5b54a31a69bd670220e42e99ec154475025a468eae58ea262f813fdc8d1c62 languageName: node linkType: hard "istanbul-lib-coverage@npm:^3.0.0, istanbul-lib-coverage@npm:^3.2.0": version: 3.2.0 resolution: "istanbul-lib-coverage@npm:3.2.0" checksum: a2a545033b9d56da04a8571ed05c8120bf10e9bce01cf8633a3a2b0d1d83dff4ac4fe78d6d5673c27fc29b7f21a41d75f83a36be09f82a61c367b56aa73c1ff9 languageName: node linkType: hard "istanbul-lib-instrument@npm:^5.0.4": version: 5.2.1 resolution: "istanbul-lib-instrument@npm:5.2.1" dependencies: "@babel/core": ^7.12.3 "@babel/parser": ^7.14.7 "@istanbuljs/schema": ^0.1.2 istanbul-lib-coverage: ^3.2.0 semver: ^6.3.0 checksum: bf16f1803ba5e51b28bbd49ed955a736488381e09375d830e42ddeb403855b2006f850711d95ad726f2ba3f1ae8e7366de7e51d2b9ac67dc4d80191ef7ddf272 languageName: node linkType: hard "istanbul-lib-instrument@npm:^6.0.0": version: 6.0.0 resolution: "istanbul-lib-instrument@npm:6.0.0" dependencies: "@babel/core": ^7.12.3 "@babel/parser": ^7.14.7 "@istanbuljs/schema": ^0.1.2 istanbul-lib-coverage: ^3.2.0 semver: ^7.5.4 checksum: b9dc3723a769e65dbe1b912f935088ffc07cf393fa78a3ce79022c91aabb0ad01405ffd56083cdd822e514798e9daae3ea7bfe85633b094ecb335d28eb0a3f97 languageName: node linkType: hard "istanbul-lib-report@npm:^3.0.0": version: 3.0.1 resolution: "istanbul-lib-report@npm:3.0.1" dependencies: istanbul-lib-coverage: ^3.0.0 make-dir: ^4.0.0 supports-color: ^7.1.0 checksum: fd17a1b879e7faf9bb1dc8f80b2a16e9f5b7b8498fe6ed580a618c34df0bfe53d2abd35bf8a0a00e628fb7405462576427c7df20bbe4148d19c14b431c974b21 languageName: node linkType: hard "istanbul-lib-source-maps@npm:^4.0.0": version: 4.0.1 resolution: "istanbul-lib-source-maps@npm:4.0.1" dependencies: debug: ^4.1.1 istanbul-lib-coverage: ^3.0.0 source-map: ^0.6.1 checksum: 21ad3df45db4b81852b662b8d4161f6446cd250c1ddc70ef96a585e2e85c26ed7cd9c2a396a71533cfb981d1a645508bc9618cae431e55d01a0628e7dec62ef2 languageName: node linkType: hard "istanbul-reports@npm:^3.1.3": version: 3.1.6 resolution: "istanbul-reports@npm:3.1.6" dependencies: html-escaper: ^2.0.0 istanbul-lib-report: ^3.0.0 checksum: 44c4c0582f287f02341e9720997f9e82c071627e1e862895745d5f52ec72c9b9f38e1d12370015d2a71dcead794f34c7732aaef3fab80a24bc617a21c3d911d6 languageName: node linkType: hard "jackspeak@npm:^2.0.3": version: 2.3.0 resolution: "jackspeak@npm:2.3.0" dependencies: "@isaacs/cliui": ^8.0.2 "@pkgjs/parseargs": ^0.11.0 dependenciesMeta: "@pkgjs/parseargs": optional: true checksum: 71bf716f4b5793226d4aeb9761ebf2605ee093b59f91a61451d57d998dd64bbf2b54323fb749b8b2ae8b6d8a463de4f6e3fedab50108671f247bbc80195a6306 languageName: node linkType: hard "jest-changed-files@npm:^29.6.3": version: 29.6.3 resolution: "jest-changed-files@npm:29.6.3" dependencies: execa: ^5.0.0 jest-util: ^29.6.3 p-limit: ^3.1.0 checksum: 55bc820a70c220a02fec214d5c48d5e0d829549e5c7b9959776b4ca3f76f5ff20c7c8ff816a847822766f1d712477ab3027f7a66ec61bf65de3f852e878b4dfd languageName: node linkType: hard "jest-circus@npm:^29.6.4": version: 29.6.4 resolution: "jest-circus@npm:29.6.4" dependencies: "@jest/environment": ^29.6.4 "@jest/expect": ^29.6.4 "@jest/test-result": ^29.6.4 "@jest/types": ^29.6.3 "@types/node": "*" chalk: ^4.0.0 co: ^4.6.0 dedent: ^1.0.0 is-generator-fn: ^2.0.0 jest-each: ^29.6.3 jest-matcher-utils: ^29.6.4 jest-message-util: ^29.6.3 jest-runtime: ^29.6.4 jest-snapshot: ^29.6.4 jest-util: ^29.6.3 p-limit: ^3.1.0 pretty-format: ^29.6.3 pure-rand: ^6.0.0 slash: ^3.0.0 stack-utils: ^2.0.3 checksum: 31f64ddf6df4aefe30ef5f8de9da137c9cba58ab5e2a25cf749450735088dc88a9974591a4256d481af0fe64608173c921219f9fad9a7dd87cbe47a79e111be8 languageName: node linkType: hard "jest-cli@npm:^29.6.4": version: 29.6.4 resolution: "jest-cli@npm:29.6.4" dependencies: "@jest/core": ^29.6.4 "@jest/test-result": ^29.6.4 "@jest/types": ^29.6.3 chalk: ^4.0.0 exit: ^0.1.2 graceful-fs: ^4.2.9 import-local: ^3.0.2 jest-config: ^29.6.4 jest-util: ^29.6.3 jest-validate: ^29.6.3 prompts: ^2.0.1 yargs: ^17.3.1 peerDependencies: node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 peerDependenciesMeta: node-notifier: optional: true bin: jest: bin/jest.js checksum: 87a85a27eff0e502717b6ee0ce861d3e50d8c47d7298477f8ca10964b958f06c20241d28f1360ce2a85072763483e4924248106a8ed530ca460a56db3fdfc53e languageName: node linkType: hard "jest-config@npm:^29.6.4": version: 29.6.4 resolution: "jest-config@npm:29.6.4" dependencies: "@babel/core": ^7.11.6 "@jest/test-sequencer": ^29.6.4 "@jest/types": ^29.6.3 babel-jest: ^29.6.4 chalk: ^4.0.0 ci-info: ^3.2.0 deepmerge: ^4.2.2 glob: ^7.1.3 graceful-fs: ^4.2.9 jest-circus: ^29.6.4 jest-environment-node: ^29.6.4 jest-get-type: ^29.6.3 jest-regex-util: ^29.6.3 jest-resolve: ^29.6.4 jest-runner: ^29.6.4 jest-util: ^29.6.3 jest-validate: ^29.6.3 micromatch: ^4.0.4 parse-json: ^5.2.0 pretty-format: ^29.6.3 slash: ^3.0.0 strip-json-comments: ^3.1.1 peerDependencies: "@types/node": "*" ts-node: ">=9.0.0" peerDependenciesMeta: "@types/node": optional: true ts-node: optional: true checksum: 177352658774344896df3988dbe892e0b117579f45cc43aebc588493665bf19a557e202f097f5b4a987314ec2d84afa0769299ac6e702c5923d1fd3cfa4692b0 languageName: node linkType: hard "jest-diff@npm:^29.6.4": version: 29.6.4 resolution: "jest-diff@npm:29.6.4" dependencies: chalk: ^4.0.0 diff-sequences: ^29.6.3 jest-get-type: ^29.6.3 pretty-format: ^29.6.3 checksum: e205c45ab6dbcc660dc2a682cddb20f6a3cbbbdecd2821cce2050619f96dbd7560ee25f7f51d42c302596aeaddbea54390b78be3ab639340d24d67e4d270a8b0 languageName: node linkType: hard "jest-docblock@npm:^29.6.3": version: 29.6.3 resolution: "jest-docblock@npm:29.6.3" dependencies: detect-newline: ^3.0.0 checksum: 6f3213a1e79e7eedafeb462acfa9a41303f9c0167893b140f6818fa16d7eb6bf3f9b9cf4669097ca6b7154847793489ecd6b4f6cfb0e416b88cfa3b4b36715b6 languageName: node linkType: hard "jest-each@npm:^29.6.3": version: 29.6.3 resolution: "jest-each@npm:29.6.3" dependencies: "@jest/types": ^29.6.3 chalk: ^4.0.0 jest-get-type: ^29.6.3 jest-util: ^29.6.3 pretty-format: ^29.6.3 checksum: fe06e80b3554e2a8464f5f5c61943e02db1f8a7177139cb55b3201a1d1513cb089d8800401f102729a31bf8dd6f88229044e6088fea9dd5647ed11e841b6b88c languageName: node linkType: hard "jest-environment-node@npm:^29.6.4": version: 29.6.4 resolution: "jest-environment-node@npm:29.6.4" dependencies: "@jest/environment": ^29.6.4 "@jest/fake-timers": ^29.6.4 "@jest/types": ^29.6.3 "@types/node": "*" jest-mock: ^29.6.3 jest-util: ^29.6.3 checksum: 518221505af4bd32c84f2af2c03f9d771de2711bd69fe7723b648fcc2e05d95b4e75f493afa9010209e26a4a3309ebee971f9b18c45b540891771d3b68c3a16e languageName: node linkType: hard "jest-get-type@npm:^29.6.3": version: 29.6.3 resolution: "jest-get-type@npm:29.6.3" checksum: 88ac9102d4679d768accae29f1e75f592b760b44277df288ad76ce5bf038c3f5ce3719dea8aa0f035dac30e9eb034b848ce716b9183ad7cc222d029f03e92205 languageName: node linkType: hard "jest-haste-map@npm:^29.6.4": version: 29.6.4 resolution: "jest-haste-map@npm:29.6.4" dependencies: "@jest/types": ^29.6.3 "@types/graceful-fs": ^4.1.3 "@types/node": "*" anymatch: ^3.0.3 fb-watchman: ^2.0.0 fsevents: ^2.3.2 graceful-fs: ^4.2.9 jest-regex-util: ^29.6.3 jest-util: ^29.6.3 jest-worker: ^29.6.4 micromatch: ^4.0.4 walker: ^1.0.8 dependenciesMeta: fsevents: optional: true checksum: 4f720fd3813bb38400b7a9a094e55664cbddd907ba1769457ed746f6c870c615167647a5b697a788183d832b1dcb1b66143e52990a6f4403283f6686077fa868 languageName: node linkType: hard "jest-leak-detector@npm:^29.6.3": version: 29.6.3 resolution: "jest-leak-detector@npm:29.6.3" dependencies: jest-get-type: ^29.6.3 pretty-format: ^29.6.3 checksum: 27548fcfc7602fe1b88f8600185e35ffff71751f3631e52bbfdfc72776f5a13a430185cf02fc632b41320a74f99ae90e40ce101c8887509f0f919608a7175129 languageName: node linkType: hard "jest-matcher-utils@npm:^29.6.4": version: 29.6.4 resolution: "jest-matcher-utils@npm:29.6.4" dependencies: chalk: ^4.0.0 jest-diff: ^29.6.4 jest-get-type: ^29.6.3 pretty-format: ^29.6.3 checksum: 9e17bce282e74bdbba2ce5475c490e0bba4f464cd42132bfc5df0337e0853af4dba925c7f4f61cbb0a4818fa121d28d7ff0196ec8829773a22fce59a822976d2 languageName: node linkType: hard "jest-message-util@npm:^29.6.3": version: 29.6.3 resolution: "jest-message-util@npm:29.6.3" dependencies: "@babel/code-frame": ^7.12.13 "@jest/types": ^29.6.3 "@types/stack-utils": ^2.0.0 chalk: ^4.0.0 graceful-fs: ^4.2.9 micromatch: ^4.0.4 pretty-format: ^29.6.3 slash: ^3.0.0 stack-utils: ^2.0.3 checksum: 59f5229a06c073a8877ba4d2e304cc07d63b0062bf5764d4bed14364403889e77f1825d1bd9017c19a840847d17dffd414dc06f1fcb537b5f9e03dbc65b84ada languageName: node linkType: hard "jest-mock@npm:^29.6.3": version: 29.6.3 resolution: "jest-mock@npm:29.6.3" dependencies: "@jest/types": ^29.6.3 "@types/node": "*" jest-util: ^29.6.3 checksum: 35772968010c0afb1bb1ef78570b9cbea907c6f967d24b4e95e1a596a1000c63d60e225fb9ddfdd5218674da4aa61d92a09927fc26310cecbbfaa8278d919e32 languageName: node linkType: hard "jest-pnp-resolver@npm:^1.2.2": version: 1.2.3 resolution: "jest-pnp-resolver@npm:1.2.3" peerDependencies: jest-resolve: "*" peerDependenciesMeta: jest-resolve: optional: true checksum: db1a8ab2cb97ca19c01b1cfa9a9c8c69a143fde833c14df1fab0766f411b1148ff0df878adea09007ac6a2085ec116ba9a996a6ad104b1e58c20adbf88eed9b2 languageName: node linkType: hard "jest-regex-util@npm:^29.6.3": version: 29.6.3 resolution: "jest-regex-util@npm:29.6.3" checksum: 0518beeb9bf1228261695e54f0feaad3606df26a19764bc19541e0fc6e2a3737191904607fb72f3f2ce85d9c16b28df79b7b1ec9443aa08c3ef0e9efda6f8f2a languageName: node linkType: hard "jest-resolve-dependencies@npm:^29.6.4": version: 29.6.4 resolution: "jest-resolve-dependencies@npm:29.6.4" dependencies: jest-regex-util: ^29.6.3 jest-snapshot: ^29.6.4 checksum: 34f81d22cbd72203130cc14cbb66d5783d9f59fba4d366b9653f8fb4f6feeaac25d89696f2f77c700659843d5440dc92f58ad443ba05da1da46c39234866d916 languageName: node linkType: hard "jest-resolve@npm:^29.6.4": version: 29.6.4 resolution: "jest-resolve@npm:29.6.4" dependencies: chalk: ^4.0.0 graceful-fs: ^4.2.9 jest-haste-map: ^29.6.4 jest-pnp-resolver: ^1.2.2 jest-util: ^29.6.3 jest-validate: ^29.6.3 resolve: ^1.20.0 resolve.exports: ^2.0.0 slash: ^3.0.0 checksum: 5f0ef260aec79ef00e16e0ba7b27d527054e1faed08a144279cd191b5c5b71af67c52b9ddfd24aa2f563d254618ce9bf7519809f23fb2abf6c4fa375503caa28 languageName: node linkType: hard "jest-runner@npm:^29.6.4": version: 29.6.4 resolution: "jest-runner@npm:29.6.4" dependencies: "@jest/console": ^29.6.4 "@jest/environment": ^29.6.4 "@jest/test-result": ^29.6.4 "@jest/transform": ^29.6.4 "@jest/types": ^29.6.3 "@types/node": "*" chalk: ^4.0.0 emittery: ^0.13.1 graceful-fs: ^4.2.9 jest-docblock: ^29.6.3 jest-environment-node: ^29.6.4 jest-haste-map: ^29.6.4 jest-leak-detector: ^29.6.3 jest-message-util: ^29.6.3 jest-resolve: ^29.6.4 jest-runtime: ^29.6.4 jest-util: ^29.6.3 jest-watcher: ^29.6.4 jest-worker: ^29.6.4 p-limit: ^3.1.0 source-map-support: 0.5.13 checksum: ca977dd30262171fe000de8407a3187c16e7057ddf690bcc21068155aacd4824ee927b544e0fa9f2885948b47a5123b472da41e095e3bcbdebb79f1fa2f2fc56 languageName: node linkType: hard "jest-runtime@npm:^29.6.4": version: 29.6.4 resolution: "jest-runtime@npm:29.6.4" dependencies: "@jest/environment": ^29.6.4 "@jest/fake-timers": ^29.6.4 "@jest/globals": ^29.6.4 "@jest/source-map": ^29.6.3 "@jest/test-result": ^29.6.4 "@jest/transform": ^29.6.4 "@jest/types": ^29.6.3 "@types/node": "*" chalk: ^4.0.0 cjs-module-lexer: ^1.0.0 collect-v8-coverage: ^1.0.0 glob: ^7.1.3 graceful-fs: ^4.2.9 jest-haste-map: ^29.6.4 jest-message-util: ^29.6.3 jest-mock: ^29.6.3 jest-regex-util: ^29.6.3 jest-resolve: ^29.6.4 jest-snapshot: ^29.6.4 jest-util: ^29.6.3 slash: ^3.0.0 strip-bom: ^4.0.0 checksum: 93deacd06f8f2bb808dbfb8acbcbc0b724187b3d3fffafd497a32c939bf385ca21f5a3f03eebd5b958a0e93865d0e68a0db73bd0fe16dafbd5e922558aa7b359 languageName: node linkType: hard "jest-snapshot@npm:^29.6.4": version: 29.6.4 resolution: "jest-snapshot@npm:29.6.4" dependencies: "@babel/core": ^7.11.6 "@babel/generator": ^7.7.2 "@babel/plugin-syntax-jsx": ^7.7.2 "@babel/plugin-syntax-typescript": ^7.7.2 "@babel/types": ^7.3.3 "@jest/expect-utils": ^29.6.4 "@jest/transform": ^29.6.4 "@jest/types": ^29.6.3 babel-preset-current-node-syntax: ^1.0.0 chalk: ^4.0.0 expect: ^29.6.4 graceful-fs: ^4.2.9 jest-diff: ^29.6.4 jest-get-type: ^29.6.3 jest-matcher-utils: ^29.6.4 jest-message-util: ^29.6.3 jest-util: ^29.6.3 natural-compare: ^1.4.0 pretty-format: ^29.6.3 semver: ^7.5.3 checksum: 0c9b5ec640457fb780ac6c9b6caa814436e9e16bf744772eee3bfd055ae5f7a3085a6a09b2f30910e31915dafc3955d92357cc98189e4d5dcb417b5fdafda6e3 languageName: node linkType: hard "jest-util@npm:^29.0.0, jest-util@npm:^29.6.3": version: 29.6.3 resolution: "jest-util@npm:29.6.3" dependencies: "@jest/types": ^29.6.3 "@types/node": "*" chalk: ^4.0.0 ci-info: ^3.2.0 graceful-fs: ^4.2.9 picomatch: ^2.2.3 checksum: 7bf3ba3ac67ac6ceff7d8fdd23a86768e23ddd9133ecd9140ef87cc0c28708effabaf67a6cd45cd9d90a63d645a522ed0825d09ee59ac4c03b9c473b1fef4c7c languageName: node linkType: hard "jest-validate@npm:^29.6.3": version: 29.6.3 resolution: "jest-validate@npm:29.6.3" dependencies: "@jest/types": ^29.6.3 camelcase: ^6.2.0 chalk: ^4.0.0 jest-get-type: ^29.6.3 leven: ^3.1.0 pretty-format: ^29.6.3 checksum: caa489ed11080441c636b8035ab71bafbdc0c052b1e452855e4d2dd24ac15e497710a270ea6fc5ef8926b22c1ce4d6e07ec2dc193f0810cff5851d7a2222c045 languageName: node linkType: hard "jest-watcher@npm:^29.6.4": version: 29.6.4 resolution: "jest-watcher@npm:29.6.4" dependencies: "@jest/test-result": ^29.6.4 "@jest/types": ^29.6.3 "@types/node": "*" ansi-escapes: ^4.2.1 chalk: ^4.0.0 emittery: ^0.13.1 jest-util: ^29.6.3 string-length: ^4.0.1 checksum: 13c0f96f7e9212e4f3ef2daf3e787045bdcec414061bf286eca934c7f4083fb04d38df9ced9c0edfbe15f3521ca581eb2ed6108c338a0db1f3e1def65687992f languageName: node linkType: hard "jest-worker@npm:^29.6.4": version: 29.6.4 resolution: "jest-worker@npm:29.6.4" dependencies: "@types/node": "*" jest-util: ^29.6.3 merge-stream: ^2.0.0 supports-color: ^8.0.0 checksum: 05d19a5759ebfeb964036065be55ad8d8e8ddffa85d9b3a4c0b95765695efb1d8226ec824a4d8e660c38cda3389bfeb98d819f47232acf9fb0e79f553b7c0a76 languageName: node linkType: hard "jest@npm:^29.5.0": version: 29.6.4 resolution: "jest@npm:29.6.4" dependencies: "@jest/core": ^29.6.4 "@jest/types": ^29.6.3 import-local: ^3.0.2 jest-cli: ^29.6.4 peerDependencies: node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 peerDependenciesMeta: node-notifier: optional: true bin: jest: bin/jest.js checksum: ba28ca7a86d029bcd742bb254c0c8d0119c1e002ddae128ff6409ebabc0b29c36f69dbf3fdd326aff16e7b2500c9a918bbc6a9a5db4d966e035127242239439f languageName: node linkType: hard "js-string-escape@npm:^1.0.1": version: 1.0.1 resolution: "js-string-escape@npm:1.0.1" checksum: f11e0991bf57e0c183b55c547acec85bd2445f043efc9ea5aa68b41bd2a3e7d3ce94636cb233ae0d84064ba4c1a505d32e969813c5b13f81e7d4be12c59256fe languageName: node linkType: hard "js-tokens@npm:^4.0.0": version: 4.0.0 resolution: "js-tokens@npm:4.0.0" checksum: 8a95213a5a77deb6cbe94d86340e8d9ace2b93bc367790b260101d2f36a2eaf4e4e22d9fa9cf459b38af3a32fb4190e638024cf82ec95ef708680e405ea7cc78 languageName: node linkType: hard "js-yaml@npm:^3.13.1, js-yaml@npm:^3.14.1": version: 3.14.1 resolution: "js-yaml@npm:3.14.1" dependencies: argparse: ^1.0.7 esprima: ^4.0.0 bin: js-yaml: bin/js-yaml.js checksum: bef146085f472d44dee30ec34e5cf36bf89164f5d585435a3d3da89e52622dff0b188a580e4ad091c3341889e14cb88cac6e4deb16dc5b1e9623bb0601fc255c languageName: node linkType: hard "js-yaml@npm:^4.1.0": version: 4.1.0 resolution: "js-yaml@npm:4.1.0" dependencies: argparse: ^2.0.1 bin: js-yaml: bin/js-yaml.js checksum: c7830dfd456c3ef2c6e355cc5a92e6700ceafa1d14bba54497b34a99f0376cecbb3e9ac14d3e5849b426d5a5140709a66237a8c991c675431271c4ce5504151a languageName: node linkType: hard "jsesc@npm:^2.5.1": version: 2.5.2 resolution: "jsesc@npm:2.5.2" bin: jsesc: bin/jsesc checksum: 4dc190771129e12023f729ce20e1e0bfceac84d73a85bc3119f7f938843fe25a4aeccb54b6494dce26fcf263d815f5f31acdefac7cc9329efb8422a4f4d9fa9d languageName: node linkType: hard "json-parse-better-errors@npm:^1.0.1": version: 1.0.2 resolution: "json-parse-better-errors@npm:1.0.2" checksum: ff2b5ba2a70e88fd97a3cb28c1840144c5ce8fae9cbeeddba15afa333a5c407cf0e42300cd0a2885dbb055227fe68d405070faad941beeffbfde9cf3b2c78c5d languageName: node linkType: hard "json-parse-even-better-errors@npm:^2.3.0": version: 2.3.1 resolution: "json-parse-even-better-errors@npm:2.3.1" checksum: 798ed4cf3354a2d9ccd78e86d2169515a0097a5c133337807cdf7f1fc32e1391d207ccfc276518cc1d7d8d4db93288b8a50ba4293d212ad1336e52a8ec0a941f languageName: node linkType: hard "json-schema-traverse@npm:^0.4.1": version: 0.4.1 resolution: "json-schema-traverse@npm:0.4.1" checksum: 7486074d3ba247769fda17d5181b345c9fb7d12e0da98b22d1d71a5db9698d8b4bd900a3ec1a4ffdd60846fc2556274a5c894d0c48795f14cb03aeae7b55260b languageName: node linkType: hard "json-stable-stringify-without-jsonify@npm:^1.0.1": version: 1.0.1 resolution: "json-stable-stringify-without-jsonify@npm:1.0.1" checksum: cff44156ddce9c67c44386ad5cddf91925fe06b1d217f2da9c4910d01f358c6e3989c4d5a02683c7a5667f9727ff05831f7aa8ae66c8ff691c556f0884d49215 languageName: node linkType: hard "json2csv@npm:^5.0.6": version: 5.0.7 resolution: "json2csv@npm:5.0.7" dependencies: commander: ^6.1.0 jsonparse: ^1.3.1 lodash.get: ^4.4.2 bin: json2csv: bin/json2csv.js checksum: 81b511e4f5abba1dcda90593c193d15e5f05f1def91377b6289536e31fdb629889da6a2b4612b9ff699116a29b1758d20c0d71f7921fcfb09863da5b2d883139 languageName: node linkType: hard "json5@npm:^1.0.2": version: 1.0.2 resolution: "json5@npm:1.0.2" dependencies: minimist: ^1.2.0 bin: json5: lib/cli.js checksum: 866458a8c58a95a49bef3adba929c625e82532bcff1fe93f01d29cb02cac7c3fe1f4b79951b7792c2da9de0b32871a8401a6e3c5b36778ad852bf5b8a61165d7 languageName: node linkType: hard "json5@npm:^2.2.3": version: 2.2.3 resolution: "json5@npm:2.2.3" bin: json5: lib/cli.js checksum: 2a7436a93393830bce797d4626275152e37e877b265e94ca69c99e3d20c2b9dab021279146a39cdb700e71b2dd32a4cebd1514cd57cee102b1af906ce5040349 languageName: node linkType: hard "jsonfile@npm:^6.0.1": version: 6.1.0 resolution: "jsonfile@npm:6.1.0" dependencies: graceful-fs: ^4.1.6 universalify: ^2.0.0 dependenciesMeta: graceful-fs: optional: true checksum: 7af3b8e1ac8fe7f1eccc6263c6ca14e1966fcbc74b618d3c78a0a2075579487547b94f72b7a1114e844a1e15bb00d440e5d1720bfc4612d790a6f285d5ea8354 languageName: node linkType: hard "jsonparse@npm:^1.3.1": version: 1.3.1 resolution: "jsonparse@npm:1.3.1" checksum: 6514a7be4674ebf407afca0eda3ba284b69b07f9958a8d3113ef1005f7ec610860c312be067e450c569aab8b89635e332cee3696789c750692bb60daba627f4d languageName: node linkType: hard "kleur@npm:^3.0.3": version: 3.0.3 resolution: "kleur@npm:3.0.3" checksum: df82cd1e172f957bae9c536286265a5cdbd5eeca487cb0a3b2a7b41ef959fc61f8e7c0e9aeea9c114ccf2c166b6a8dd45a46fd619c1c569d210ecd2765ad5169 languageName: node linkType: hard "kleur@npm:^4.1.4": version: 4.1.5 resolution: "kleur@npm:4.1.5" checksum: 1dc476e32741acf0b1b5b0627ffd0d722e342c1b0da14de3e8ae97821327ca08f9fb944542fb3c126d90ac5f27f9d804edbe7c585bf7d12ef495d115e0f22c12 languageName: node linkType: hard "leven@npm:^3.1.0": version: 3.1.0 resolution: "leven@npm:3.1.0" checksum: 638401d534585261b6003db9d99afd244dfe82d75ddb6db5c0df412842d5ab30b2ef18de471aaec70fe69a46f17b4ae3c7f01d8a4e6580ef7adb9f4273ad1e55 languageName: node linkType: hard "levn@npm:^0.4.1": version: 0.4.1 resolution: "levn@npm:0.4.1" dependencies: prelude-ls: ^1.2.1 type-check: ~0.4.0 checksum: 12c5021c859bd0f5248561bf139121f0358285ec545ebf48bb3d346820d5c61a4309535c7f387ed7d84361cf821e124ce346c6b7cef8ee09a67c1473b46d0fc4 languageName: node linkType: hard "lilconfig@npm:2.1.0": version: 2.1.0 resolution: "lilconfig@npm:2.1.0" checksum: 8549bb352b8192375fed4a74694cd61ad293904eee33f9d4866c2192865c44c4eb35d10782966242634e0cbc1e91fe62b1247f148dc5514918e3a966da7ea117 languageName: node linkType: hard "lines-and-columns@npm:^1.1.6": version: 1.2.4 resolution: "lines-and-columns@npm:1.2.4" checksum: 0c37f9f7fa212b38912b7145e1cd16a5f3cd34d782441c3e6ca653485d326f58b3caccda66efce1c5812bde4961bbde3374fae4b0d11bf1226152337f3894aa5 languageName: node linkType: hard "lint-staged@npm:^13.1.0": version: 13.3.0 resolution: "lint-staged@npm:13.3.0" dependencies: chalk: 5.3.0 commander: 11.0.0 debug: 4.3.4 execa: 7.2.0 lilconfig: 2.1.0 listr2: 6.6.1 micromatch: 4.0.5 pidtree: 0.6.0 string-argv: 0.3.2 yaml: 2.3.1 bin: lint-staged: bin/lint-staged.js checksum: f7c146cc2849c9ce4f1d2808d990fcbdef5e0bb79e6e79cc895f53c91f5ac4dcefdb8c3465156b38a015dcb051f2795c6bda4f20a1e2f2fa654c7ba521b2d2e0 languageName: node linkType: hard "listr2@npm:6.6.1": version: 6.6.1 resolution: "listr2@npm:6.6.1" dependencies: cli-truncate: ^3.1.0 colorette: ^2.0.20 eventemitter3: ^5.0.1 log-update: ^5.0.1 rfdc: ^1.3.0 wrap-ansi: ^8.1.0 peerDependencies: enquirer: ">= 2.3.0 < 3" peerDependenciesMeta: enquirer: optional: true checksum: 99600e8a51f838f7208bce7e16d6b3d91d361f13881e6aa91d0b561a9a093ddcf63b7bc2a7b47aec7fdbff9d0e8c9f68cb66e6dfe2d857e5b1df8ab045c26ce8 languageName: node linkType: hard "load-json-file@npm:^4.0.0": version: 4.0.0 resolution: "load-json-file@npm:4.0.0" dependencies: graceful-fs: ^4.1.2 parse-json: ^4.0.0 pify: ^3.0.0 strip-bom: ^3.0.0 checksum: 8f5d6d93ba64a9620445ee9bde4d98b1eac32cf6c8c2d20d44abfa41a6945e7969456ab5f1ca2fb06ee32e206c9769a20eec7002fe290de462e8c884b6b8b356 languageName: node linkType: hard "load-json-file@npm:^7.0.0": version: 7.0.1 resolution: "load-json-file@npm:7.0.1" checksum: a560288da6891778321ef993e4bdbdf05374a4f3a3aeedd5ba6b64672798c830d748cfc59a2ec9891a3db30e78b3d04172e0dcb0d4828168289a393147ca0e74 languageName: node linkType: hard "locate-path@npm:^5.0.0": version: 5.0.0 resolution: "locate-path@npm:5.0.0" dependencies: p-locate: ^4.1.0 checksum: 83e51725e67517287d73e1ded92b28602e3ae5580b301fe54bfb76c0c723e3f285b19252e375712316774cf52006cb236aed5704692c32db0d5d089b69696e30 languageName: node linkType: hard "locate-path@npm:^6.0.0": version: 6.0.0 resolution: "locate-path@npm:6.0.0" dependencies: p-locate: ^5.0.0 checksum: 72eb661788a0368c099a184c59d2fee760b3831c9c1c33955e8a19ae4a21b4116e53fa736dc086cdeb9fce9f7cc508f2f92d2d3aae516f133e16a2bb59a39f5a languageName: node linkType: hard "locate-path@npm:^7.1.0": version: 7.2.0 resolution: "locate-path@npm:7.2.0" dependencies: p-locate: ^6.0.0 checksum: c1b653bdf29beaecb3d307dfb7c44d98a2a98a02ebe353c9ad055d1ac45d6ed4e1142563d222df9b9efebc2bcb7d4c792b507fad9e7150a04c29530b7db570f8 languageName: node linkType: hard "lodash.get@npm:^4.4.2": version: 4.4.2 resolution: "lodash.get@npm:4.4.2" checksum: e403047ddb03181c9d0e92df9556570e2b67e0f0a930fcbbbd779370972368f5568e914f913e93f3b08f6d492abc71e14d4e9b7a18916c31fa04bd2306efe545 languageName: node linkType: hard "lodash.memoize@npm:4.x": version: 4.1.2 resolution: "lodash.memoize@npm:4.1.2" checksum: 9ff3942feeccffa4f1fafa88d32f0d24fdc62fd15ded5a74a5f950ff5f0c6f61916157246744c620173dddf38d37095a92327d5fd3861e2063e736a5c207d089 languageName: node linkType: hard "lodash.merge@npm:^4.6.2": version: 4.6.2 resolution: "lodash.merge@npm:4.6.2" checksum: ad580b4bdbb7ca1f7abf7e1bce63a9a0b98e370cf40194b03380a46b4ed799c9573029599caebc1b14e3f24b111aef72b96674a56cfa105e0f5ac70546cdc005 languageName: node linkType: hard "lodash@npm:^4.17.15, lodash@npm:^4.17.4": version: 4.17.21 resolution: "lodash@npm:4.17.21" checksum: eb835a2e51d381e561e508ce932ea50a8e5a68f4ebdd771ea240d3048244a8d13658acbd502cd4829768c56f2e16bdd4340b9ea141297d472517b83868e677f7 languageName: node linkType: hard "log-update@npm:^4.0.0": version: 4.0.0 resolution: "log-update@npm:4.0.0" dependencies: ansi-escapes: ^4.3.0 cli-cursor: ^3.1.0 slice-ansi: ^4.0.0 wrap-ansi: ^6.2.0 checksum: ae2f85bbabc1906034154fb7d4c4477c79b3e703d22d78adee8b3862fa913942772e7fa11713e3d96fb46de4e3cabefbf5d0a544344f03b58d3c4bff52aa9eb2 languageName: node linkType: hard "log-update@npm:^5.0.1": version: 5.0.1 resolution: "log-update@npm:5.0.1" dependencies: ansi-escapes: ^5.0.0 cli-cursor: ^4.0.0 slice-ansi: ^5.0.0 strip-ansi: ^7.0.1 wrap-ansi: ^8.0.1 checksum: 2c6b47dcce6f9233df6d232a37d9834cb3657a0749ef6398f1706118de74c55f158587d4128c225297ea66803f35c5ac3db4f3f617046d817233c45eedc32ef1 languageName: node linkType: hard "lru-cache@npm:^5.1.1": version: 5.1.1 resolution: "lru-cache@npm:5.1.1" dependencies: yallist: ^3.0.2 checksum: c154ae1cbb0c2206d1501a0e94df349653c92c8cbb25236d7e85190bcaf4567a03ac6eb43166fabfa36fd35623694da7233e88d9601fbf411a9a481d85dbd2cb languageName: node linkType: hard "lru-cache@npm:^6.0.0": version: 6.0.0 resolution: "lru-cache@npm:6.0.0" dependencies: yallist: ^4.0.0 checksum: f97f499f898f23e4585742138a22f22526254fdba6d75d41a1c2526b3b6cc5747ef59c5612ba7375f42aca4f8461950e925ba08c991ead0651b4918b7c978297 languageName: node linkType: hard "lru-cache@npm:^7.7.1": version: 7.18.3 resolution: "lru-cache@npm:7.18.3" checksum: e550d772384709deea3f141af34b6d4fa392e2e418c1498c078de0ee63670f1f46f5eee746e8ef7e69e1c895af0d4224e62ee33e66a543a14763b0f2e74c1356 languageName: node linkType: hard "lru-cache@npm:^9.1.1 || ^10.0.0": version: 10.0.1 resolution: "lru-cache@npm:10.0.1" checksum: 06f8d0e1ceabd76bb6f644a26dbb0b4c471b79c7b514c13c6856113879b3bf369eb7b497dad4ff2b7e2636db202412394865b33c332100876d838ad1372f0181 languageName: node linkType: hard "make-dir@npm:^4.0.0": version: 4.0.0 resolution: "make-dir@npm:4.0.0" dependencies: semver: ^7.5.3 checksum: bf0731a2dd3aab4db6f3de1585cea0b746bb73eb5a02e3d8d72757e376e64e6ada190b1eddcde5b2f24a81b688a9897efd5018737d05e02e2a671dda9cff8a8a languageName: node linkType: hard "make-error@npm:1.x": version: 1.3.6 resolution: "make-error@npm:1.3.6" checksum: b86e5e0e25f7f777b77fabd8e2cbf15737972869d852a22b7e73c17623928fccb826d8e46b9951501d3f20e51ad74ba8c59ed584f610526a48f8ccf88aaec402 languageName: node linkType: hard "make-fetch-happen@npm:^11.0.3": version: 11.1.1 resolution: "make-fetch-happen@npm:11.1.1" dependencies: agentkeepalive: ^4.2.1 cacache: ^17.0.0 http-cache-semantics: ^4.1.1 http-proxy-agent: ^5.0.0 https-proxy-agent: ^5.0.0 is-lambda: ^1.0.1 lru-cache: ^7.7.1 minipass: ^5.0.0 minipass-fetch: ^3.0.0 minipass-flush: ^1.0.5 minipass-pipeline: ^1.2.4 negotiator: ^0.6.3 promise-retry: ^2.0.1 socks-proxy-agent: ^7.0.0 ssri: ^10.0.0 checksum: 7268bf274a0f6dcf0343829489a4506603ff34bd0649c12058753900b0eb29191dce5dba12680719a5d0a983d3e57810f594a12f3c18494e93a1fbc6348a4540 languageName: node linkType: hard "makeerror@npm:1.0.12": version: 1.0.12 resolution: "makeerror@npm:1.0.12" dependencies: tmpl: 1.0.5 checksum: b38a025a12c8146d6eeea5a7f2bf27d51d8ad6064da8ca9405fcf7bf9b54acd43e3b30ddd7abb9b1bfa4ddb266019133313482570ddb207de568f71ecfcf6060 languageName: node linkType: hard "map-age-cleaner@npm:^0.1.3": version: 0.1.3 resolution: "map-age-cleaner@npm:0.1.3" dependencies: p-defer: ^1.0.0 checksum: cb2804a5bcb3cbdfe4b59066ea6d19f5e7c8c196cd55795ea4c28f792b192e4c442426ae52524e5e1acbccf393d3bddacefc3d41f803e66453f6c4eda3650bc1 languageName: node linkType: hard "matcher@npm:^5.0.0": version: 5.0.0 resolution: "matcher@npm:5.0.0" dependencies: escape-string-regexp: ^5.0.0 checksum: 28f191c2d23fee0f6f32fd0181d9fe173b0ab815a919edba55605438a2f9fa40372e002574a1b17add981b0a8669c75bc6194318d065ed2dceffd8b160c38118 languageName: node linkType: hard "md5-hex@npm:^3.0.1": version: 3.0.1 resolution: "md5-hex@npm:3.0.1" dependencies: blueimp-md5: ^2.10.0 checksum: 6799a19e8bdd3e0c2861b94c1d4d858a89220488d7885c1fa236797e367d0c2e5f2b789e05309307083503f85be3603a9686a5915568a473137d6b4117419cc2 languageName: node linkType: hard "mem@npm:^9.0.2": version: 9.0.2 resolution: "mem@npm:9.0.2" dependencies: map-age-cleaner: ^0.1.3 mimic-fn: ^4.0.0 checksum: 07829bb182af0e3ecf748dc2edb1c3b10a256ef10458f7e24d06561a2adc2b3ef34d14abe81678bbcedb46faa477e7370223f118b1a5e1252da5fe43496f3967 languageName: node linkType: hard "memorystream@npm:^0.3.1": version: 0.3.1 resolution: "memorystream@npm:0.3.1" checksum: f18b42440d24d09516d01466c06adf797df7873f0d40aa7db02e5fb9ed83074e5e65412d0720901d7069363465f82dc4f8bcb44f0cde271567a61426ce6ca2e9 languageName: node linkType: hard "merge-stream@npm:^2.0.0": version: 2.0.0 resolution: "merge-stream@npm:2.0.0" checksum: 6fa4dcc8d86629705cea944a4b88ef4cb0e07656ebf223fa287443256414283dd25d91c1cd84c77987f2aec5927af1a9db6085757cb43d90eb170ebf4b47f4f4 languageName: node linkType: hard "merge2@npm:^1.3.0, merge2@npm:^1.4.1": version: 1.4.1 resolution: "merge2@npm:1.4.1" checksum: 7268db63ed5169466540b6fb947aec313200bcf6d40c5ab722c22e242f651994619bcd85601602972d3c85bd2cc45a358a4c61937e9f11a061919a1da569b0c2 languageName: node linkType: hard "micromatch@npm:4.0.5, micromatch@npm:^4.0.4": version: 4.0.5 resolution: "micromatch@npm:4.0.5" dependencies: braces: ^3.0.2 picomatch: ^2.3.1 checksum: 02a17b671c06e8fefeeb6ef996119c1e597c942e632a21ef589154f23898c9c6a9858526246abb14f8bca6e77734aa9dcf65476fca47cedfb80d9577d52843fc languageName: node linkType: hard "mimic-fn@npm:^2.1.0": version: 2.1.0 resolution: "mimic-fn@npm:2.1.0" checksum: d2421a3444848ce7f84bd49115ddacff29c15745db73f54041edc906c14b131a38d05298dae3081667627a59b2eb1ca4b436ff2e1b80f69679522410418b478a languageName: node linkType: hard "mimic-fn@npm:^4.0.0": version: 4.0.0 resolution: "mimic-fn@npm:4.0.0" checksum: 995dcece15ee29aa16e188de6633d43a3db4611bcf93620e7e62109ec41c79c0f34277165b8ce5e361205049766e371851264c21ac64ca35499acb5421c2ba56 languageName: node linkType: hard "minimatch@npm:^3.0.4, minimatch@npm:^3.0.5, minimatch@npm:^3.1.2": version: 3.1.2 resolution: "minimatch@npm:3.1.2" dependencies: brace-expansion: ^1.1.7 checksum: c154e566406683e7bcb746e000b84d74465b3a832c45d59912b9b55cd50dee66e5c4b1e5566dba26154040e51672f9aa450a9aef0c97cfc7336b78b7afb9540a languageName: node linkType: hard "minimatch@npm:^9.0.1": version: 9.0.3 resolution: "minimatch@npm:9.0.3" dependencies: brace-expansion: ^2.0.1 checksum: 253487976bf485b612f16bf57463520a14f512662e592e95c571afdab1442a6a6864b6c88f248ce6fc4ff0b6de04ac7aa6c8bb51e868e99d1d65eb0658a708b5 languageName: node linkType: hard "minimist@npm:^1.2.0, minimist@npm:^1.2.6": version: 1.2.6 resolution: "minimist@npm:1.2.6" checksum: d15428cd1e11eb14e1233bcfb88ae07ed7a147de251441d61158619dfb32c4d7e9061d09cab4825fdee18ecd6fce323228c8c47b5ba7cd20af378ca4048fb3fb languageName: node linkType: hard "minipass-collect@npm:^1.0.2": version: 1.0.2 resolution: "minipass-collect@npm:1.0.2" dependencies: minipass: ^3.0.0 checksum: 14df761028f3e47293aee72888f2657695ec66bd7d09cae7ad558da30415fdc4752bbfee66287dcc6fd5e6a2fa3466d6c484dc1cbd986525d9393b9523d97f10 languageName: node linkType: hard "minipass-fetch@npm:^3.0.0": version: 3.0.4 resolution: "minipass-fetch@npm:3.0.4" dependencies: encoding: ^0.1.13 minipass: ^7.0.3 minipass-sized: ^1.0.3 minizlib: ^2.1.2 dependenciesMeta: encoding: optional: true checksum: af7aad15d5c128ab1ebe52e043bdf7d62c3c6f0cecb9285b40d7b395e1375b45dcdfd40e63e93d26a0e8249c9efd5c325c65575aceee192883970ff8cb11364a languageName: node linkType: hard "minipass-flush@npm:^1.0.5": version: 1.0.5 resolution: "minipass-flush@npm:1.0.5" dependencies: minipass: ^3.0.0 checksum: 56269a0b22bad756a08a94b1ffc36b7c9c5de0735a4dd1ab2b06c066d795cfd1f0ac44a0fcae13eece5589b908ecddc867f04c745c7009be0b566421ea0944cf languageName: node linkType: hard "minipass-pipeline@npm:^1.2.4": version: 1.2.4 resolution: "minipass-pipeline@npm:1.2.4" dependencies: minipass: ^3.0.0 checksum: b14240dac0d29823c3d5911c286069e36d0b81173d7bdf07a7e4a91ecdef92cdff4baaf31ea3746f1c61e0957f652e641223970870e2353593f382112257971b languageName: node linkType: hard "minipass-sized@npm:^1.0.3": version: 1.0.3 resolution: "minipass-sized@npm:1.0.3" dependencies: minipass: ^3.0.0 checksum: 79076749fcacf21b5d16dd596d32c3b6bf4d6e62abb43868fac21674078505c8b15eaca4e47ed844985a4514854f917d78f588fcd029693709417d8f98b2bd60 languageName: node linkType: hard "minipass@npm:^3.0.0": version: 3.3.6 resolution: "minipass@npm:3.3.6" dependencies: yallist: ^4.0.0 checksum: a30d083c8054cee83cdcdc97f97e4641a3f58ae743970457b1489ce38ee1167b3aaf7d815cd39ec7a99b9c40397fd4f686e83750e73e652b21cb516f6d845e48 languageName: node linkType: hard "minipass@npm:^5.0.0": version: 5.0.0 resolution: "minipass@npm:5.0.0" checksum: 425dab288738853fded43da3314a0b5c035844d6f3097a8e3b5b29b328da8f3c1af6fc70618b32c29ff906284cf6406b6841376f21caaadd0793c1d5a6a620ea languageName: node linkType: hard "minipass@npm:^5.0.0 || ^6.0.2 || ^7.0.0, minipass@npm:^7.0.3": version: 7.0.3 resolution: "minipass@npm:7.0.3" checksum: 6f1614f5b5b55568a46bca5fec0e7c46dac027691db27d0e1923a8192866903144cd962ac772c0e9f89b608ea818b702709c042bce98e190d258847d85461531 languageName: node linkType: hard "minizlib@npm:^2.1.1, minizlib@npm:^2.1.2": version: 2.1.2 resolution: "minizlib@npm:2.1.2" dependencies: minipass: ^3.0.0 yallist: ^4.0.0 checksum: f1fdeac0b07cf8f30fcf12f4b586795b97be856edea22b5e9072707be51fc95d41487faec3f265b42973a304fe3a64acd91a44a3826a963e37b37bafde0212c3 languageName: node linkType: hard "mkdirp@npm:^1.0.3": version: 1.0.4 resolution: "mkdirp@npm:1.0.4" bin: mkdirp: bin/cmd.js checksum: a96865108c6c3b1b8e1d5e9f11843de1e077e57737602de1b82030815f311be11f96f09cce59bd5b903d0b29834733e5313f9301e3ed6d6f6fba2eae0df4298f languageName: node linkType: hard "ms@npm:2.1.2, ms@npm:^2.1.1": version: 2.1.2 resolution: "ms@npm:2.1.2" checksum: 673cdb2c3133eb050c745908d8ce632ed2c02d85640e2edb3ace856a2266a813b30c613569bf3354fdf4ea7d1a1494add3bfa95e2713baa27d0c2c71fc44f58f languageName: node linkType: hard "ms@npm:^2.0.0, ms@npm:^2.1.3": version: 2.1.3 resolution: "ms@npm:2.1.3" checksum: aa92de608021b242401676e35cfa5aa42dd70cbdc082b916da7fb925c542173e36bce97ea3e804923fe92c0ad991434e4a38327e15a1b5b5f945d66df615ae6d languageName: node linkType: hard "natural-compare-lite@npm:^1.4.0": version: 1.4.0 resolution: "natural-compare-lite@npm:1.4.0" checksum: 5222ac3986a2b78dd6069ac62cbb52a7bf8ffc90d972ab76dfe7b01892485d229530ed20d0c62e79a6b363a663b273db3bde195a1358ce9e5f779d4453887225 languageName: node linkType: hard "natural-compare@npm:^1.4.0": version: 1.4.0 resolution: "natural-compare@npm:1.4.0" checksum: 23ad088b08f898fc9b53011d7bb78ec48e79de7627e01ab5518e806033861bef68d5b0cd0e2205c2f36690ac9571ff6bcb05eb777ced2eeda8d4ac5b44592c3d languageName: node linkType: hard "negotiator@npm:^0.6.3": version: 0.6.3 resolution: "negotiator@npm:0.6.3" checksum: b8ffeb1e262eff7968fc90a2b6767b04cfd9842582a9d0ece0af7049537266e7b2506dfb1d107a32f06dd849ab2aea834d5830f7f4d0e5cb7d36e1ae55d021d9 languageName: node linkType: hard "nice-try@npm:^1.0.4": version: 1.0.5 resolution: "nice-try@npm:1.0.5" checksum: 0b4af3b5bb5d86c289f7a026303d192a7eb4417231fe47245c460baeabae7277bcd8fd9c728fb6bd62c30b3e15cd6620373e2cf33353b095d8b403d3e8a15aff languageName: node linkType: hard "node-gyp@npm:latest": version: 9.4.0 resolution: "node-gyp@npm:9.4.0" dependencies: env-paths: ^2.2.0 exponential-backoff: ^3.1.1 glob: ^7.1.4 graceful-fs: ^4.2.6 make-fetch-happen: ^11.0.3 nopt: ^6.0.0 npmlog: ^6.0.0 rimraf: ^3.0.2 semver: ^7.3.5 tar: ^6.1.2 which: ^2.0.2 bin: node-gyp: bin/node-gyp.js checksum: 78b404e2e0639d64e145845f7f5a3cb20c0520cdaf6dda2f6e025e9b644077202ea7de1232396ba5bde3fee84cdc79604feebe6ba3ec84d464c85d407bb5da99 languageName: node linkType: hard "node-int64@npm:^0.4.0": version: 0.4.0 resolution: "node-int64@npm:0.4.0" checksum: d0b30b1ee6d961851c60d5eaa745d30b5c95d94bc0e74b81e5292f7c42a49e3af87f1eb9e89f59456f80645d679202537de751b7d72e9e40ceea40c5e449057e languageName: node linkType: hard "node-releases@npm:^2.0.13": version: 2.0.13 resolution: "node-releases@npm:2.0.13" checksum: 17ec8f315dba62710cae71a8dad3cd0288ba943d2ece43504b3b1aa8625bf138637798ab470b1d9035b0545996f63000a8a926e0f6d35d0996424f8b6d36dda3 languageName: node linkType: hard "nofilter@npm:^3.1.0": version: 3.1.0 resolution: "nofilter@npm:3.1.0" checksum: 58aa85a5b4b35cbb6e42de8a8591c5e338061edc9f3e7286f2c335e9e9b9b8fa7c335ae45daa8a1f3433164dc0b9a3d187fa96f9516e04a17a1f9ce722becc4f languageName: node linkType: hard "nopt@npm:^6.0.0": version: 6.0.0 resolution: "nopt@npm:6.0.0" dependencies: abbrev: ^1.0.0 bin: nopt: bin/nopt.js checksum: 82149371f8be0c4b9ec2f863cc6509a7fd0fa729929c009f3a58e4eb0c9e4cae9920e8f1f8eb46e7d032fec8fb01bede7f0f41a67eb3553b7b8e14fa53de1dac languageName: node linkType: hard "normalize-package-data@npm:^2.3.2": version: 2.5.0 resolution: "normalize-package-data@npm:2.5.0" dependencies: hosted-git-info: ^2.1.4 resolve: ^1.10.0 semver: 2 || 3 || 4 || 5 validate-npm-package-license: ^3.0.1 checksum: 7999112efc35a6259bc22db460540cae06564aa65d0271e3bdfa86876d08b0e578b7b5b0028ee61b23f1cae9fc0e7847e4edc0948d3068a39a2a82853efc8499 languageName: node linkType: hard "normalize-path@npm:^3.0.0, normalize-path@npm:~3.0.0": version: 3.0.0 resolution: "normalize-path@npm:3.0.0" checksum: 88eeb4da891e10b1318c4b2476b6e2ecbeb5ff97d946815ffea7794c31a89017c70d7f34b3c2ebf23ef4e9fc9fb99f7dffe36da22011b5b5c6ffa34f4873ec20 languageName: node linkType: hard "npm-run-all@npm:^4.1.5": version: 4.1.5 resolution: "npm-run-all@npm:4.1.5" dependencies: ansi-styles: ^3.2.1 chalk: ^2.4.1 cross-spawn: ^6.0.5 memorystream: ^0.3.1 minimatch: ^3.0.4 pidtree: ^0.3.0 read-pkg: ^3.0.0 shell-quote: ^1.6.1 string.prototype.padend: ^3.0.0 bin: npm-run-all: bin/npm-run-all/index.js run-p: bin/run-p/index.js run-s: bin/run-s/index.js checksum: 373b72c6a36564da13c1642c1fd9bb4dcc756bce7a3648f883772f02661095319820834ff813762d2fee403e9b40c1cd27c8685807c107440f10eb19c006d4a0 languageName: node linkType: hard "npm-run-path@npm:^4.0.1": version: 4.0.1 resolution: "npm-run-path@npm:4.0.1" dependencies: path-key: ^3.0.0 checksum: 5374c0cea4b0bbfdfae62da7bbdf1e1558d338335f4cacf2515c282ff358ff27b2ecb91ffa5330a8b14390ac66a1e146e10700440c1ab868208430f56b5f4d23 languageName: node linkType: hard "npm-run-path@npm:^5.1.0": version: 5.1.0 resolution: "npm-run-path@npm:5.1.0" dependencies: path-key: ^4.0.0 checksum: dc184eb5ec239d6a2b990b43236845332ef12f4e0beaa9701de724aa797fe40b6bbd0157fb7639d24d3ab13f5d5cf22d223a19c6300846b8126f335f788bee66 languageName: node linkType: hard "npmlog@npm:^6.0.0": version: 6.0.2 resolution: "npmlog@npm:6.0.2" dependencies: are-we-there-yet: ^3.0.0 console-control-strings: ^1.1.0 gauge: ^4.0.3 set-blocking: ^2.0.0 checksum: ae238cd264a1c3f22091cdd9e2b106f684297d3c184f1146984ecbe18aaa86343953f26b9520dedd1b1372bc0316905b736c1932d778dbeb1fcf5a1001390e2a languageName: node linkType: hard "object-inspect@npm:^1.12.3, object-inspect@npm:^1.9.0": version: 1.12.3 resolution: "object-inspect@npm:1.12.3" checksum: dabfd824d97a5f407e6d5d24810d888859f6be394d8b733a77442b277e0808860555176719c5905e765e3743a7cada6b8b0a3b85e5331c530fd418cc8ae991db languageName: node linkType: hard "object-keys@npm:^1.1.1": version: 1.1.1 resolution: "object-keys@npm:1.1.1" checksum: b363c5e7644b1e1b04aa507e88dcb8e3a2f52b6ffd0ea801e4c7a62d5aa559affe21c55a07fd4b1fd55fc03a33c610d73426664b20032405d7b92a1414c34d6a languageName: node linkType: hard "object.assign@npm:^4.1.4": version: 4.1.4 resolution: "object.assign@npm:4.1.4" dependencies: call-bind: ^1.0.2 define-properties: ^1.1.4 has-symbols: ^1.0.3 object-keys: ^1.1.1 checksum: 76cab513a5999acbfe0ff355f15a6a125e71805fcf53de4e9d4e082e1989bdb81d1e329291e1e4e0ae7719f0e4ef80e88fb2d367ae60500d79d25a6224ac8864 languageName: node linkType: hard "object.fromentries@npm:^2.0.6": version: 2.0.6 resolution: "object.fromentries@npm:2.0.6" dependencies: call-bind: ^1.0.2 define-properties: ^1.1.4 es-abstract: ^1.20.4 checksum: 453c6d694180c0c30df451b60eaf27a5b9bca3fb43c37908fd2b78af895803dc631242bcf05582173afa40d8d0e9c96e16e8874b39471aa53f3ac1f98a085d85 languageName: node linkType: hard "object.groupby@npm:^1.0.0": version: 1.0.0 resolution: "object.groupby@npm:1.0.0" dependencies: call-bind: ^1.0.2 define-properties: ^1.2.0 es-abstract: ^1.21.2 get-intrinsic: ^1.2.1 checksum: 64b00b287d57580111c958e7ff375c9b61811fa356f2cf0d35372d43cab61965701f00fac66c19fd8f49c4dfa28744bee6822379c69a73648ad03e09fcdeae70 languageName: node linkType: hard "object.values@npm:^1.1.6": version: 1.1.6 resolution: "object.values@npm:1.1.6" dependencies: call-bind: ^1.0.2 define-properties: ^1.1.4 es-abstract: ^1.20.4 checksum: f6fff9fd817c24cfd8107f50fb33061d81cd11bacc4e3dbb3852e9ff7692fde4dbce823d4333ea27cd9637ef1b6690df5fbb61f1ed314fa2959598dc3ae23d8e languageName: node linkType: hard "once@npm:^1.3.0": version: 1.4.0 resolution: "once@npm:1.4.0" dependencies: wrappy: 1 checksum: cd0a88501333edd640d95f0d2700fbde6bff20b3d4d9bdc521bdd31af0656b5706570d6c6afe532045a20bb8dc0849f8332d6f2a416e0ba6d3d3b98806c7db68 languageName: node linkType: hard "onetime@npm:^5.1.0, onetime@npm:^5.1.2": version: 5.1.2 resolution: "onetime@npm:5.1.2" dependencies: mimic-fn: ^2.1.0 checksum: 2478859ef817fc5d4e9c2f9e5728512ddd1dbc9fb7829ad263765bb6d3b91ce699d6e2332eef6b7dff183c2f490bd3349f1666427eaba4469fba0ac38dfd0d34 languageName: node linkType: hard "onetime@npm:^6.0.0": version: 6.0.0 resolution: "onetime@npm:6.0.0" dependencies: mimic-fn: ^4.0.0 checksum: 0846ce78e440841335d4e9182ef69d5762e9f38aa7499b19f42ea1c4cd40f0b4446094c455c713f9adac3f4ae86f613bb5e30c99e52652764d06a89f709b3788 languageName: node linkType: hard "optionator@npm:^0.9.3": version: 0.9.3 resolution: "optionator@npm:0.9.3" dependencies: "@aashutoshrathi/word-wrap": ^1.2.3 deep-is: ^0.1.3 fast-levenshtein: ^2.0.6 levn: ^0.4.1 prelude-ls: ^1.2.1 type-check: ^0.4.0 checksum: 09281999441f2fe9c33a5eeab76700795365a061563d66b098923eb719251a42bdbe432790d35064d0816ead9296dbeb1ad51a733edf4167c96bd5d0882e428a languageName: node linkType: hard "p-defer@npm:^1.0.0": version: 1.0.0 resolution: "p-defer@npm:1.0.0" checksum: 4271b935c27987e7b6f229e5de4cdd335d808465604644cb7b4c4c95bef266735859a93b16415af8a41fd663ee9e3b97a1a2023ca9def613dba1bad2a0da0c7b languageName: node linkType: hard "p-event@npm:^5.0.1": version: 5.0.1 resolution: "p-event@npm:5.0.1" dependencies: p-timeout: ^5.0.2 checksum: 3bdd8df6092e6b149f25e9c2eb1c0843b3b4279b07be2a2c72c02b65b267a8908c2040fefd606f2497b0f2bcefcd214f8ca5a74f0c883515d400ccf1d88d5683 languageName: node linkType: hard "p-limit@npm:^2.2.0": version: 2.3.0 resolution: "p-limit@npm:2.3.0" dependencies: p-try: ^2.0.0 checksum: 84ff17f1a38126c3314e91ecfe56aecbf36430940e2873dadaa773ffe072dc23b7af8e46d4b6485d302a11673fe94c6b67ca2cfbb60c989848b02100d0594ac1 languageName: node linkType: hard "p-limit@npm:^3.0.2, p-limit@npm:^3.1.0": version: 3.1.0 resolution: "p-limit@npm:3.1.0" dependencies: yocto-queue: ^0.1.0 checksum: 7c3690c4dbf62ef625671e20b7bdf1cbc9534e83352a2780f165b0d3ceba21907e77ad63401708145ca4e25bfc51636588d89a8c0aeb715e6c37d1c066430360 languageName: node linkType: hard "p-limit@npm:^4.0.0": version: 4.0.0 resolution: "p-limit@npm:4.0.0" dependencies: yocto-queue: ^1.0.0 checksum: 01d9d70695187788f984226e16c903475ec6a947ee7b21948d6f597bed788e3112cc7ec2e171c1d37125057a5f45f3da21d8653e04a3a793589e12e9e80e756b languageName: node linkType: hard "p-locate@npm:^4.1.0": version: 4.1.0 resolution: "p-locate@npm:4.1.0" dependencies: p-limit: ^2.2.0 checksum: 513bd14a455f5da4ebfcb819ef706c54adb09097703de6aeaa5d26fe5ea16df92b48d1ac45e01e3944ce1e6aa2a66f7f8894742b8c9d6e276e16cd2049a2b870 languageName: node linkType: hard "p-locate@npm:^5.0.0": version: 5.0.0 resolution: "p-locate@npm:5.0.0" dependencies: p-limit: ^3.0.2 checksum: 1623088f36cf1cbca58e9b61c4e62bf0c60a07af5ae1ca99a720837356b5b6c5ba3eb1b2127e47a06865fee59dd0453cad7cc844cda9d5a62ac1a5a51b7c86d3 languageName: node linkType: hard "p-locate@npm:^6.0.0": version: 6.0.0 resolution: "p-locate@npm:6.0.0" dependencies: p-limit: ^4.0.0 checksum: 2bfe5234efa5e7a4e74b30a5479a193fdd9236f8f6b4d2f3f69e3d286d9a7d7ab0c118a2a50142efcf4e41625def635bd9332d6cbf9cc65d85eb0718c579ab38 languageName: node linkType: hard "p-map@npm:^4.0.0": version: 4.0.0 resolution: "p-map@npm:4.0.0" dependencies: aggregate-error: ^3.0.0 checksum: cb0ab21ec0f32ddffd31dfc250e3afa61e103ef43d957cc45497afe37513634589316de4eb88abdfd969fe6410c22c0b93ab24328833b8eb1ccc087fc0442a1c languageName: node linkType: hard "p-map@npm:^5.5.0": version: 5.5.0 resolution: "p-map@npm:5.5.0" dependencies: aggregate-error: ^4.0.0 checksum: 065cb6fca6b78afbd070dd9224ff160dc23eea96e57863c09a0c8ea7ce921043f76854be7ee0abc295cff1ac9adcf700e79a1fbe3b80b625081087be58e7effb languageName: node linkType: hard "p-timeout@npm:^5.0.2": version: 5.1.0 resolution: "p-timeout@npm:5.1.0" checksum: f5cd4e17301ff1ff1d8dbf2817df0ad88c6bba99349fc24d8d181827176ad4f8aca649190b8a5b1a428dfd6ddc091af4606835d3e0cb0656e04045da5c9e270c languageName: node linkType: hard "p-try@npm:^2.0.0": version: 2.2.0 resolution: "p-try@npm:2.2.0" checksum: f8a8e9a7693659383f06aec604ad5ead237c7a261c18048a6e1b5b85a5f8a067e469aa24f5bc009b991ea3b058a87f5065ef4176793a200d4917349881216cae languageName: node linkType: hard "parent-module@npm:^1.0.0": version: 1.0.1 resolution: "parent-module@npm:1.0.1" dependencies: callsites: ^3.0.0 checksum: 6ba8b255145cae9470cf5551eb74be2d22281587af787a2626683a6c20fbb464978784661478dd2a3f1dad74d1e802d403e1b03c1a31fab310259eec8ac560ff languageName: node linkType: hard "parse-json@npm:^4.0.0": version: 4.0.0 resolution: "parse-json@npm:4.0.0" dependencies: error-ex: ^1.3.1 json-parse-better-errors: ^1.0.1 checksum: 0fe227d410a61090c247e34fa210552b834613c006c2c64d9a05cfe9e89cf8b4246d1246b1a99524b53b313e9ac024438d0680f67e33eaed7e6f38db64cfe7b5 languageName: node linkType: hard "parse-json@npm:^5.2.0": version: 5.2.0 resolution: "parse-json@npm:5.2.0" dependencies: "@babel/code-frame": ^7.0.0 error-ex: ^1.3.1 json-parse-even-better-errors: ^2.3.0 lines-and-columns: ^1.1.6 checksum: 62085b17d64da57f40f6afc2ac1f4d95def18c4323577e1eced571db75d9ab59b297d1d10582920f84b15985cbfc6b6d450ccbf317644cfa176f3ed982ad87e2 languageName: node linkType: hard "parse-ms@npm:^3.0.0": version: 3.0.0 resolution: "parse-ms@npm:3.0.0" checksum: fc602bba093835562321a67a9d6c8c9687ca4f26a09459a77e07ebd7efddd1a5766725ec60eb0c83a2abe67f7a23808f7deb1c1226727776eaf7f9607ae09db2 languageName: node linkType: hard "path-exists@npm:^4.0.0": version: 4.0.0 resolution: "path-exists@npm:4.0.0" checksum: 505807199dfb7c50737b057dd8d351b82c033029ab94cb10a657609e00c1bc53b951cfdbccab8de04c5584d5eff31128ce6afd3db79281874a5ef2adbba55ed1 languageName: node linkType: hard "path-exists@npm:^5.0.0": version: 5.0.0 resolution: "path-exists@npm:5.0.0" checksum: 8ca842868cab09423994596eb2c5ec2a971c17d1a3cb36dbf060592c730c725cd524b9067d7d2a1e031fef9ba7bd2ac6dc5ec9fb92aa693265f7be3987045254 languageName: node linkType: hard "path-is-absolute@npm:^1.0.0": version: 1.0.1 resolution: "path-is-absolute@npm:1.0.1" checksum: 060840f92cf8effa293bcc1bea81281bd7d363731d214cbe5c227df207c34cd727430f70c6037b5159c8a870b9157cba65e775446b0ab06fd5ecc7e54615a3b8 languageName: node linkType: hard "path-key@npm:^2.0.1": version: 2.0.1 resolution: "path-key@npm:2.0.1" checksum: f7ab0ad42fe3fb8c7f11d0c4f849871e28fbd8e1add65c370e422512fc5887097b9cf34d09c1747d45c942a8c1e26468d6356e2df3f740bf177ab8ca7301ebfd languageName: node linkType: hard "path-key@npm:^3.0.0, path-key@npm:^3.1.0": version: 3.1.1 resolution: "path-key@npm:3.1.1" checksum: 55cd7a9dd4b343412a8386a743f9c746ef196e57c823d90ca3ab917f90ab9f13dd0ded27252ba49dbdfcab2b091d998bc446f6220cd3cea65db407502a740020 languageName: node linkType: hard "path-key@npm:^4.0.0": version: 4.0.0 resolution: "path-key@npm:4.0.0" checksum: 8e6c314ae6d16b83e93032c61020129f6f4484590a777eed709c4a01b50e498822b00f76ceaf94bc64dbd90b327df56ceadce27da3d83393790f1219e07721d7 languageName: node linkType: hard "path-parse@npm:^1.0.7": version: 1.0.7 resolution: "path-parse@npm:1.0.7" checksum: 49abf3d81115642938a8700ec580da6e830dde670be21893c62f4e10bd7dd4c3742ddc603fe24f898cba7eb0c6bc1777f8d9ac14185d34540c6d4d80cd9cae8a languageName: node linkType: hard "path-scurry@npm:^1.10.1": version: 1.10.1 resolution: "path-scurry@npm:1.10.1" dependencies: lru-cache: ^9.1.1 || ^10.0.0 minipass: ^5.0.0 || ^6.0.2 || ^7.0.0 checksum: e2557cff3a8fb8bc07afdd6ab163a92587884f9969b05bbbaf6fe7379348bfb09af9ed292af12ed32398b15fb443e81692047b786d1eeb6d898a51eb17ed7d90 languageName: node linkType: hard "path-type@npm:^3.0.0": version: 3.0.0 resolution: "path-type@npm:3.0.0" dependencies: pify: ^3.0.0 checksum: 735b35e256bad181f38fa021033b1c33cfbe62ead42bb2222b56c210e42938eecb272ae1949f3b6db4ac39597a61b44edd8384623ec4d79bfdc9a9c0f12537a6 languageName: node linkType: hard "path-type@npm:^4.0.0": version: 4.0.0 resolution: "path-type@npm:4.0.0" checksum: 5b1e2daa247062061325b8fdbfd1fb56dde0a448fb1455453276ea18c60685bdad23a445dc148cf87bc216be1573357509b7d4060494a6fd768c7efad833ee45 languageName: node linkType: hard "picocolors@npm:^1.0.0": version: 1.0.0 resolution: "picocolors@npm:1.0.0" checksum: a2e8092dd86c8396bdba9f2b5481032848525b3dc295ce9b57896f931e63fc16f79805144321f72976383fc249584672a75cc18d6777c6b757603f372f745981 languageName: node linkType: hard "picomatch@npm:^2.0.4, picomatch@npm:^2.2.1, picomatch@npm:^2.2.3, picomatch@npm:^2.3.1": version: 2.3.1 resolution: "picomatch@npm:2.3.1" checksum: 050c865ce81119c4822c45d3c84f1ced46f93a0126febae20737bd05ca20589c564d6e9226977df859ed5e03dc73f02584a2b0faad36e896936238238b0446cf languageName: node linkType: hard "pidtree@npm:0.6.0": version: 0.6.0 resolution: "pidtree@npm:0.6.0" bin: pidtree: bin/pidtree.js checksum: 8fbc073ede9209dd15e80d616e65eb674986c93be49f42d9ddde8dbbd141bb53d628a7ca4e58ab5c370bb00383f67d75df59a9a226dede8fa801267a7030c27a languageName: node linkType: hard "pidtree@npm:^0.3.0": version: 0.3.1 resolution: "pidtree@npm:0.3.1" bin: pidtree: bin/pidtree.js checksum: eb49025099f1af89a4696f7673351421f13420f3397b963c901fe23a1c9c2ff50f4750321970d4472c0ffbb065e4a6c3c27f75e226cc62284b19e21d32ce7012 languageName: node linkType: hard "pify@npm:^3.0.0": version: 3.0.0 resolution: "pify@npm:3.0.0" checksum: 6cdcbc3567d5c412450c53261a3f10991665d660961e06605decf4544a61a97a54fefe70a68d5c37080ff9d6f4cf51444c90198d1ba9f9309a6c0d6e9f5c4fde languageName: node linkType: hard "pirates@npm:^4.0.4, pirates@npm:^4.0.5": version: 4.0.6 resolution: "pirates@npm:4.0.6" checksum: 46a65fefaf19c6f57460388a5af9ab81e3d7fd0e7bc44ca59d753cb5c4d0df97c6c6e583674869762101836d68675f027d60f841c105d72734df9dfca97cbcc6 languageName: node linkType: hard "pkg-conf@npm:^4.0.0": version: 4.0.0 resolution: "pkg-conf@npm:4.0.0" dependencies: find-up: ^6.0.0 load-json-file: ^7.0.0 checksum: 6da0c064a74f6c7ae80d7d68c5853e14f7e762a2a80c6ca9e0aa827002b90b69c86fefe3bac830b10a6f1739e7f96a1f728637f2a141e50b0fdafe92a2c3eab6 languageName: node linkType: hard "pkg-dir@npm:^4.2.0": version: 4.2.0 resolution: "pkg-dir@npm:4.2.0" dependencies: find-up: ^4.0.0 checksum: 9863e3f35132bf99ae1636d31ff1e1e3501251d480336edb1c211133c8d58906bed80f154a1d723652df1fda91e01c7442c2eeaf9dc83157c7ae89087e43c8d6 languageName: node linkType: hard "platform@npm:^1.3.3": version: 1.3.6 resolution: "platform@npm:1.3.6" checksum: 6f472a09c61d418c7e26c1c16d0bdc029549d512dbec6526216a1e59ec68100d07007d0097dcba69dddad883d6f2a83361b4bdfe0094a3d9a2af24158643d85e languageName: node linkType: hard "plur@npm:^5.1.0": version: 5.1.0 resolution: "plur@npm:5.1.0" dependencies: irregular-plurals: ^3.3.0 checksum: 57e400dc4b926768fb0abab7f8688fe17e85673712134546e7beaaee188bae7e0504976e847d7e41d0d6103ff2fd61204095f03c2a45de19a8bad15aecb45cc1 languageName: node linkType: hard "prelude-ls@npm:^1.2.1": version: 1.2.1 resolution: "prelude-ls@npm:1.2.1" checksum: cd192ec0d0a8e4c6da3bb80e4f62afe336df3f76271ac6deb0e6a36187133b6073a19e9727a1ff108cd8b9982e4768850d413baa71214dd80c7979617dca827a languageName: node linkType: hard "prettier-linter-helpers@npm:^1.0.0": version: 1.0.0 resolution: "prettier-linter-helpers@npm:1.0.0" dependencies: fast-diff: ^1.1.2 checksum: 00ce8011cf6430158d27f9c92cfea0a7699405633f7f1d4a45f07e21bf78e99895911cbcdc3853db3a824201a7c745bd49bfea8abd5fb9883e765a90f74f8392 languageName: node linkType: hard "prettier@npm:^2.8.3": version: 2.8.8 resolution: "prettier@npm:2.8.8" bin: prettier: bin-prettier.js checksum: b49e409431bf129dd89238d64299ba80717b57ff5a6d1c1a8b1a28b590d998a34e083fa13573bc732bb8d2305becb4c9a4407f8486c81fa7d55100eb08263cf8 languageName: node linkType: hard "pretty-format@npm:^29.0.0, pretty-format@npm:^29.6.3": version: 29.6.3 resolution: "pretty-format@npm:29.6.3" dependencies: "@jest/schemas": ^29.6.3 ansi-styles: ^5.0.0 react-is: ^18.0.0 checksum: 4e1c0db48e65571c22e80ff92123925ff8b3a2a89b71c3a1683cfde711004d492de32fe60c6bc10eea8bf6c678e5cbe544ac6c56cb8096e1eb7caf856928b1c4 languageName: node linkType: hard "pretty-ms@npm:^8.0.0": version: 8.0.0 resolution: "pretty-ms@npm:8.0.0" dependencies: parse-ms: ^3.0.0 checksum: b7d2a8182887af0e5ab93f9df331f10db9b8eda86855e2de115eb01a6c501bde5631a8813b1b0abdd7d045e79b08ae875369a8fd279a3dacd6d9e572bdd3bfa6 languageName: node linkType: hard "promise-retry@npm:^2.0.1": version: 2.0.1 resolution: "promise-retry@npm:2.0.1" dependencies: err-code: ^2.0.2 retry: ^0.12.0 checksum: f96a3f6d90b92b568a26f71e966cbbc0f63ab85ea6ff6c81284dc869b41510e6cdef99b6b65f9030f0db422bf7c96652a3fff9f2e8fb4a0f069d8f4430359429 languageName: node linkType: hard "prompts@npm:^2.0.1": version: 2.4.2 resolution: "prompts@npm:2.4.2" dependencies: kleur: ^3.0.3 sisteransi: ^1.0.5 checksum: d8fd1fe63820be2412c13bfc5d0a01909acc1f0367e32396962e737cb2fc52d004f3302475d5ce7d18a1e8a79985f93ff04ee03007d091029c3f9104bffc007d languageName: node linkType: hard "punycode@npm:^2.1.0": version: 2.3.0 resolution: "punycode@npm:2.3.0" checksum: 39f760e09a2a3bbfe8f5287cf733ecdad69d6af2fe6f97ca95f24b8921858b91e9ea3c9eeec6e08cede96181b3bb33f95c6ffd8c77e63986508aa2e8159fa200 languageName: node linkType: hard "pure-rand@npm:^6.0.0": version: 6.0.2 resolution: "pure-rand@npm:6.0.2" checksum: 79de33876a4f515d759c48e98d00756bbd916b4ea260cc572d7adfa4b62cace9952e89f0241d0410214554503d25061140fe325c66f845213d2b1728ba8d413e languageName: node linkType: hard "queue-microtask@npm:^1.2.2": version: 1.2.3 resolution: "queue-microtask@npm:1.2.3" checksum: b676f8c040cdc5b12723ad2f91414d267605b26419d5c821ff03befa817ddd10e238d22b25d604920340fd73efd8ba795465a0377c4adf45a4a41e4234e42dc4 languageName: node linkType: hard "react-is@npm:^18.0.0": version: 18.2.0 resolution: "react-is@npm:18.2.0" checksum: e72d0ba81b5922759e4aff17e0252bd29988f9642ed817f56b25a3e217e13eea8a7f2322af99a06edb779da12d5d636e9fda473d620df9a3da0df2a74141d53e languageName: node linkType: hard "read-pkg@npm:^3.0.0": version: 3.0.0 resolution: "read-pkg@npm:3.0.0" dependencies: load-json-file: ^4.0.0 normalize-package-data: ^2.3.2 path-type: ^3.0.0 checksum: 398903ebae6c7e9965419a1062924436cc0b6f516c42c4679a90290d2f87448ed8f977e7aa2dbba4aa1ac09248628c43e493ac25b2bc76640e946035200e34c6 languageName: node linkType: hard "readable-stream@npm:^3.6.0": version: 3.6.2 resolution: "readable-stream@npm:3.6.2" dependencies: inherits: ^2.0.3 string_decoder: ^1.1.1 util-deprecate: ^1.0.1 checksum: bdcbe6c22e846b6af075e32cf8f4751c2576238c5043169a1c221c92ee2878458a816a4ea33f4c67623c0b6827c8a400409bfb3cf0bf3381392d0b1dfb52ac8d languageName: node linkType: hard "readdirp@npm:~3.6.0": version: 3.6.0 resolution: "readdirp@npm:3.6.0" dependencies: picomatch: ^2.2.1 checksum: 1ced032e6e45670b6d7352d71d21ce7edf7b9b928494dcaba6f11fba63180d9da6cd7061ebc34175ffda6ff529f481818c962952004d273178acd70f7059b320 languageName: node linkType: hard "regexp.prototype.flags@npm:^1.5.0": version: 1.5.0 resolution: "regexp.prototype.flags@npm:1.5.0" dependencies: call-bind: ^1.0.2 define-properties: ^1.2.0 functions-have-names: ^1.2.3 checksum: c541687cdbdfff1b9a07f6e44879f82c66bbf07665f9a7544c5fd16acdb3ec8d1436caab01662d2fbcad403f3499d49ab0b77fbc7ef29ef961d98cc4bc9755b4 languageName: node linkType: hard "require-directory@npm:^2.1.1": version: 2.1.1 resolution: "require-directory@npm:2.1.1" checksum: fb47e70bf0001fdeabdc0429d431863e9475e7e43ea5f94ad86503d918423c1543361cc5166d713eaa7029dd7a3d34775af04764bebff99ef413111a5af18c80 languageName: node linkType: hard "resolve-cwd@npm:^3.0.0": version: 3.0.0 resolution: "resolve-cwd@npm:3.0.0" dependencies: resolve-from: ^5.0.0 checksum: 546e0816012d65778e580ad62b29e975a642989108d9a3c5beabfb2304192fa3c9f9146fbdfe213563c6ff51975ae41bac1d3c6e047dd9572c94863a057b4d81 languageName: node linkType: hard "resolve-from@npm:^4.0.0": version: 4.0.0 resolution: "resolve-from@npm:4.0.0" checksum: f4ba0b8494846a5066328ad33ef8ac173801a51739eb4d63408c847da9a2e1c1de1e6cbbf72699211f3d13f8fc1325648b169bd15eb7da35688e30a5fb0e4a7f languageName: node linkType: hard "resolve-from@npm:^5.0.0": version: 5.0.0 resolution: "resolve-from@npm:5.0.0" checksum: 4ceeb9113e1b1372d0cd969f3468fa042daa1dd9527b1b6bb88acb6ab55d8b9cd65dbf18819f9f9ddf0db804990901dcdaade80a215e7b2c23daae38e64f5bdf languageName: node linkType: hard "resolve.exports@npm:^2.0.0": version: 2.0.2 resolution: "resolve.exports@npm:2.0.2" checksum: 1c7778ca1b86a94f8ab4055d196c7d87d1874b96df4d7c3e67bbf793140f0717fd506dcafd62785b079cd6086b9264424ad634fb904409764c3509c3df1653f2 languageName: node linkType: hard "resolve@npm:^1.10.0, resolve@npm:^1.20.0, resolve@npm:^1.22.4": version: 1.22.4 resolution: "resolve@npm:1.22.4" dependencies: is-core-module: ^2.13.0 path-parse: ^1.0.7 supports-preserve-symlinks-flag: ^1.0.0 bin: resolve: bin/resolve checksum: 23f25174c2736ce24c6d918910e0d1f89b6b38fefa07a995dff864acd7863d59a7f049e691f93b4b2ee29696303390d921552b6d1b841ed4a8101f517e1d0124 languageName: node linkType: hard "resolve@patch:resolve@^1.10.0#~builtin<compat/resolve>, resolve@patch:resolve@^1.20.0#~builtin<compat/resolve>, resolve@patch:resolve@^1.22.4#~builtin<compat/resolve>": version: 1.22.4 resolution: "resolve@patch:resolve@npm%3A1.22.4#~builtin<compat/resolve>::version=1.22.4&hash=c3c19d" dependencies: is-core-module: ^2.13.0 path-parse: ^1.0.7 supports-preserve-symlinks-flag: ^1.0.0 bin: resolve: bin/resolve checksum: c45f2545fdc4d21883861b032789e20aa67a2f2692f68da320cc84d5724cd02f2923766c5354b3210897e88f1a7b3d6d2c7c22faeead8eed7078e4c783a444bc languageName: node linkType: hard "restore-cursor@npm:^3.1.0": version: 3.1.0 resolution: "restore-cursor@npm:3.1.0" dependencies: onetime: ^5.1.0 signal-exit: ^3.0.2 checksum: f877dd8741796b909f2a82454ec111afb84eb45890eb49ac947d87991379406b3b83ff9673a46012fca0d7844bb989f45cc5b788254cf1a39b6b5a9659de0630 languageName: node linkType: hard "restore-cursor@npm:^4.0.0": version: 4.0.0 resolution: "restore-cursor@npm:4.0.0" dependencies: onetime: ^5.1.0 signal-exit: ^3.0.2 checksum: 5b675c5a59763bf26e604289eab35711525f11388d77f409453904e1e69c0d37ae5889295706b2c81d23bd780165084d040f9b68fffc32cc921519031c4fa4af languageName: node linkType: hard "retry@npm:^0.12.0": version: 0.12.0 resolution: "retry@npm:0.12.0" checksum: 623bd7d2e5119467ba66202d733ec3c2e2e26568074923bc0585b6b99db14f357e79bdedb63cab56cec47491c4a0da7e6021a7465ca6dc4f481d3898fdd3158c languageName: node linkType: hard "reusify@npm:^1.0.4": version: 1.0.4 resolution: "reusify@npm:1.0.4" checksum: c3076ebcc22a6bc252cb0b9c77561795256c22b757f40c0d8110b1300723f15ec0fc8685e8d4ea6d7666f36c79ccc793b1939c748bf36f18f542744a4e379fcc languageName: node linkType: hard "rfdc@npm:^1.3.0": version: 1.3.0 resolution: "rfdc@npm:1.3.0" checksum: fb2ba8512e43519983b4c61bd3fa77c0f410eff6bae68b08614437bc3f35f91362215f7b4a73cbda6f67330b5746ce07db5dd9850ad3edc91271ad6deea0df32 languageName: node linkType: hard "rimraf@npm:^3.0.2": version: 3.0.2 resolution: "rimraf@npm:3.0.2" dependencies: glob: ^7.1.3 bin: rimraf: bin.js checksum: 87f4164e396f0171b0a3386cc1877a817f572148ee13a7e113b238e48e8a9f2f31d009a92ec38a591ff1567d9662c6b67fd8818a2dbbaed74bc26a87a2a4a9a0 languageName: node linkType: hard "run-parallel@npm:^1.1.9": version: 1.2.0 resolution: "run-parallel@npm:1.2.0" dependencies: queue-microtask: ^1.2.2 checksum: cb4f97ad25a75ebc11a8ef4e33bb962f8af8516bb2001082ceabd8902e15b98f4b84b4f8a9b222e5d57fc3bd1379c483886ed4619367a7680dad65316993021d languageName: node linkType: hard "safe-array-concat@npm:^1.0.0": version: 1.0.0 resolution: "safe-array-concat@npm:1.0.0" dependencies: call-bind: ^1.0.2 get-intrinsic: ^1.2.0 has-symbols: ^1.0.3 isarray: ^2.0.5 checksum: f43cb98fe3b566327d0c09284de2b15fb85ae964a89495c1b1a5d50c7c8ed484190f4e5e71aacc167e16231940079b326f2c0807aea633d47cc7322f40a6b57f languageName: node linkType: hard "safe-buffer@npm:~5.2.0": version: 5.2.1 resolution: "safe-buffer@npm:5.2.1" checksum: b99c4b41fdd67a6aaf280fcd05e9ffb0813654894223afb78a31f14a19ad220bba8aba1cb14eddce1fcfb037155fe6de4e861784eb434f7d11ed58d1e70dd491 languageName: node linkType: hard "safe-regex-test@npm:^1.0.0": version: 1.0.0 resolution: "safe-regex-test@npm:1.0.0" dependencies: call-bind: ^1.0.2 get-intrinsic: ^1.1.3 is-regex: ^1.1.4 checksum: bc566d8beb8b43c01b94e67de3f070fd2781685e835959bbbaaec91cc53381145ca91f69bd837ce6ec244817afa0a5e974fc4e40a2957f0aca68ac3add1ddd34 languageName: node linkType: hard "safer-buffer@npm:>= 2.1.2 < 3.0.0": version: 2.1.2 resolution: "safer-buffer@npm:2.1.2" checksum: cab8f25ae6f1434abee8d80023d7e72b598cf1327164ddab31003c51215526801e40b66c5e65d658a0af1e9d6478cadcb4c745f4bd6751f97d8644786c0978b0 languageName: node linkType: hard "semver@npm:2 || 3 || 4 || 5, semver@npm:^5.5.0": version: 5.7.1 resolution: "semver@npm:5.7.1" bin: semver: ./bin/semver checksum: 57fd0acfd0bac382ee87cd52cd0aaa5af086a7dc8d60379dfe65fea491fb2489b6016400813930ecd61fd0952dae75c115287a1b16c234b1550887117744dfaf languageName: node linkType: hard "semver@npm:^6.3.0, semver@npm:^6.3.1": version: 6.3.1 resolution: "semver@npm:6.3.1" bin: semver: bin/semver.js checksum: ae47d06de28836adb9d3e25f22a92943477371292d9b665fb023fae278d345d508ca1958232af086d85e0155aee22e313e100971898bbb8d5d89b8b1d4054ca2 languageName: node linkType: hard "semver@npm:^7.3.2, semver@npm:^7.3.5, semver@npm:^7.3.7, semver@npm:^7.5.3, semver@npm:^7.5.4": version: 7.5.4 resolution: "semver@npm:7.5.4" dependencies: lru-cache: ^6.0.0 bin: semver: bin/semver.js checksum: 12d8ad952fa353b0995bf180cdac205a4068b759a140e5d3c608317098b3575ac2f1e09182206bf2eb26120e1c0ed8fb92c48c592f6099680de56bb071423ca3 languageName: node linkType: hard "serialize-error@npm:^7.0.1": version: 7.0.1 resolution: "serialize-error@npm:7.0.1" dependencies: type-fest: ^0.13.1 checksum: e0aba4dca2fc9fe74ae1baf38dbd99190e1945445a241ba646290f2176cdb2032281a76443b02ccf0caf30da5657d510746506368889a593b9835a497fc0732e languageName: node linkType: hard "set-blocking@npm:^2.0.0": version: 2.0.0 resolution: "set-blocking@npm:2.0.0" checksum: 6e65a05f7cf7ebdf8b7c75b101e18c0b7e3dff4940d480efed8aad3a36a4005140b660fa1d804cb8bce911cac290441dc728084a30504d3516ac2ff7ad607b02 languageName: node linkType: hard "shebang-command@npm:^1.2.0": version: 1.2.0 resolution: "shebang-command@npm:1.2.0" dependencies: shebang-regex: ^1.0.0 checksum: 9eed1750301e622961ba5d588af2212505e96770ec376a37ab678f965795e995ade7ed44910f5d3d3cb5e10165a1847f52d3348c64e146b8be922f7707958908 languageName: node linkType: hard "shebang-command@npm:^2.0.0": version: 2.0.0 resolution: "shebang-command@npm:2.0.0" dependencies: shebang-regex: ^3.0.0 checksum: 6b52fe87271c12968f6a054e60f6bde5f0f3d2db483a1e5c3e12d657c488a15474121a1d55cd958f6df026a54374ec38a4a963988c213b7570e1d51575cea7fa languageName: node linkType: hard "shebang-regex@npm:^1.0.0": version: 1.0.0 resolution: "shebang-regex@npm:1.0.0" checksum: 404c5a752cd40f94591dfd9346da40a735a05139dac890ffc229afba610854d8799aaa52f87f7e0c94c5007f2c6af55bdcaeb584b56691926c5eaf41dc8f1372 languageName: node linkType: hard "shebang-regex@npm:^3.0.0": version: 3.0.0 resolution: "shebang-regex@npm:3.0.0" checksum: 1a2bcae50de99034fcd92ad4212d8e01eedf52c7ec7830eedcf886622804fe36884278f2be8be0ea5fde3fd1c23911643a4e0f726c8685b61871c8908af01222 languageName: node linkType: hard "shell-quote@npm:^1.6.1": version: 1.8.1 resolution: "shell-quote@npm:1.8.1" checksum: 5f01201f4ef504d4c6a9d0d283fa17075f6770bfbe4c5850b074974c68062f37929ca61700d95ad2ac8822e14e8c4b990ca0e6e9272e64befd74ce5e19f0736b languageName: node linkType: hard "side-channel@npm:^1.0.4": version: 1.0.4 resolution: "side-channel@npm:1.0.4" dependencies: call-bind: ^1.0.0 get-intrinsic: ^1.0.2 object-inspect: ^1.9.0 checksum: 351e41b947079c10bd0858364f32bb3a7379514c399edb64ab3dce683933483fc63fb5e4efe0a15a2e8a7e3c436b6a91736ddb8d8c6591b0460a24bb4a1ee245 languageName: node linkType: hard "signal-exit@npm:^3.0.2, signal-exit@npm:^3.0.3, signal-exit@npm:^3.0.7": version: 3.0.7 resolution: "signal-exit@npm:3.0.7" checksum: a2f098f247adc367dffc27845853e9959b9e88b01cb301658cfe4194352d8d2bb32e18467c786a7fe15f1d44b233ea35633d076d5e737870b7139949d1ab6318 languageName: node linkType: hard "signal-exit@npm:^4.0.1": version: 4.1.0 resolution: "signal-exit@npm:4.1.0" checksum: 64c757b498cb8629ffa5f75485340594d2f8189e9b08700e69199069c8e3070fb3e255f7ab873c05dc0b3cec412aea7402e10a5990cb6a050bd33ba062a6c549 languageName: node linkType: hard "sisteransi@npm:^1.0.5": version: 1.0.5 resolution: "sisteransi@npm:1.0.5" checksum: aba6438f46d2bfcef94cf112c835ab395172c75f67453fe05c340c770d3c402363018ae1ab4172a1026a90c47eaccf3af7b6ff6fa749a680c2929bd7fa2b37a4 languageName: node linkType: hard "slash@npm:^3.0.0": version: 3.0.0 resolution: "slash@npm:3.0.0" checksum: 94a93fff615f25a999ad4b83c9d5e257a7280c90a32a7cb8b4a87996e4babf322e469c42b7f649fd5796edd8687652f3fb452a86dc97a816f01113183393f11c languageName: node linkType: hard "slash@npm:^4.0.0": version: 4.0.0 resolution: "slash@npm:4.0.0" checksum: da8e4af73712253acd21b7853b7e0dbba776b786e82b010a5bfc8b5051a1db38ed8aba8e1e8f400dd2c9f373be91eb1c42b66e91abb407ff42b10feece5e1d2d languageName: node linkType: hard "slice-ansi@npm:^4.0.0": version: 4.0.0 resolution: "slice-ansi@npm:4.0.0" dependencies: ansi-styles: ^4.0.0 astral-regex: ^2.0.0 is-fullwidth-code-point: ^3.0.0 checksum: 4a82d7f085b0e1b070e004941ada3c40d3818563ac44766cca4ceadd2080427d337554f9f99a13aaeb3b4a94d9964d9466c807b3d7b7541d1ec37ee32d308756 languageName: node linkType: hard "slice-ansi@npm:^5.0.0": version: 5.0.0 resolution: "slice-ansi@npm:5.0.0" dependencies: ansi-styles: ^6.0.0 is-fullwidth-code-point: ^4.0.0 checksum: 7e600a2a55e333a21ef5214b987c8358fe28bfb03c2867ff2cbf919d62143d1812ac27b4297a077fdaf27a03da3678e49551c93e35f9498a3d90221908a1180e languageName: node linkType: hard "smart-buffer@npm:^4.2.0": version: 4.2.0 resolution: "smart-buffer@npm:4.2.0" checksum: b5167a7142c1da704c0e3af85c402002b597081dd9575031a90b4f229ca5678e9a36e8a374f1814c8156a725d17008ae3bde63b92f9cfd132526379e580bec8b languageName: node linkType: hard "socks-proxy-agent@npm:^7.0.0": version: 7.0.0 resolution: "socks-proxy-agent@npm:7.0.0" dependencies: agent-base: ^6.0.2 debug: ^4.3.3 socks: ^2.6.2 checksum: 720554370154cbc979e2e9ce6a6ec6ced205d02757d8f5d93fe95adae454fc187a5cbfc6b022afab850a5ce9b4c7d73e0f98e381879cf45f66317a4895953846 languageName: node linkType: hard "socks@npm:^2.6.2": version: 2.7.1 resolution: "socks@npm:2.7.1" dependencies: ip: ^2.0.0 smart-buffer: ^4.2.0 checksum: 259d9e3e8e1c9809a7f5c32238c3d4d2a36b39b83851d0f573bfde5f21c4b1288417ce1af06af1452569cd1eb0841169afd4998f0e04ba04656f6b7f0e46d748 languageName: node linkType: hard "source-map-support@npm:0.5.13": version: 0.5.13 resolution: "source-map-support@npm:0.5.13" dependencies: buffer-from: ^1.0.0 source-map: ^0.6.0 checksum: 933550047b6c1a2328599a21d8b7666507427c0f5ef5eaadd56b5da0fd9505e239053c66fe181bf1df469a3b7af9d775778eee283cbb7ae16b902ddc09e93a97 languageName: node linkType: hard "source-map-support@npm:^0.5.21": version: 0.5.21 resolution: "source-map-support@npm:0.5.21" dependencies: buffer-from: ^1.0.0 source-map: ^0.6.0 checksum: 43e98d700d79af1d36f859bdb7318e601dfc918c7ba2e98456118ebc4c4872b327773e5a1df09b0524e9e5063bb18f0934538eace60cca2710d1fa687645d137 languageName: node linkType: hard "source-map@npm:^0.6.0, source-map@npm:^0.6.1": version: 0.6.1 resolution: "source-map@npm:0.6.1" checksum: 59ce8640cf3f3124f64ac289012c2b8bd377c238e316fb323ea22fbfe83da07d81e000071d7242cad7a23cd91c7de98e4df8830ec3f133cb6133a5f6e9f67bc2 languageName: node linkType: hard "spdx-correct@npm:^3.0.0": version: 3.1.1 resolution: "spdx-correct@npm:3.1.1" dependencies: spdx-expression-parse: ^3.0.0 spdx-license-ids: ^3.0.0 checksum: 77ce438344a34f9930feffa61be0eddcda5b55fc592906ef75621d4b52c07400a97084d8701557b13f7d2aae0cb64f808431f469e566ef3fe0a3a131dcb775a6 languageName: node linkType: hard "spdx-exceptions@npm:^2.1.0": version: 2.3.0 resolution: "spdx-exceptions@npm:2.3.0" checksum: cb69a26fa3b46305637123cd37c85f75610e8c477b6476fa7354eb67c08128d159f1d36715f19be6f9daf4b680337deb8c65acdcae7f2608ba51931540687ac0 languageName: node linkType: hard "spdx-expression-parse@npm:^3.0.0": version: 3.0.1 resolution: "spdx-expression-parse@npm:3.0.1" dependencies: spdx-exceptions: ^2.1.0 spdx-license-ids: ^3.0.0 checksum: a1c6e104a2cbada7a593eaa9f430bd5e148ef5290d4c0409899855ce8b1c39652bcc88a725259491a82601159d6dc790bedefc9016c7472f7de8de7361f8ccde languageName: node linkType: hard "spdx-license-ids@npm:^3.0.0": version: 3.0.5 resolution: "spdx-license-ids@npm:3.0.5" checksum: b1ceea3f87407ec375d1de90f6fc7610d6c845ff5f8db21d4d752b3d4e121df563c78113df7c564daff4e8778ad54b9a9024a7e9ea3779f13a43dd0e9128c08e languageName: node linkType: hard "sprintf-js@npm:~1.0.2": version: 1.0.3 resolution: "sprintf-js@npm:1.0.3" checksum: 19d79aec211f09b99ec3099b5b2ae2f6e9cdefe50bc91ac4c69144b6d3928a640bb6ae5b3def70c2e85a2c3d9f5ec2719921e3a59d3ca3ef4b2fd1a4656a0df3 languageName: node linkType: hard "ssri@npm:^10.0.0": version: 10.0.5 resolution: "ssri@npm:10.0.5" dependencies: minipass: ^7.0.3 checksum: 0a31b65f21872dea1ed3f7c200d7bc1c1b91c15e419deca14f282508ba917cbb342c08a6814c7f68ca4ca4116dd1a85da2bbf39227480e50125a1ceffeecb750 languageName: node linkType: hard "stack-utils@npm:^2.0.3, stack-utils@npm:^2.0.6": version: 2.0.6 resolution: "stack-utils@npm:2.0.6" dependencies: escape-string-regexp: ^2.0.0 checksum: 052bf4d25bbf5f78e06c1d5e67de2e088b06871fa04107ca8d3f0e9d9263326e2942c8bedee3545795fc77d787d443a538345eef74db2f8e35db3558c6f91ff7 languageName: node linkType: hard "string-argv@npm:0.3.2": version: 0.3.2 resolution: "string-argv@npm:0.3.2" checksum: 8703ad3f3db0b2641ed2adbb15cf24d3945070d9a751f9e74a924966db9f325ac755169007233e8985a39a6a292f14d4fee20482989b89b96e473c4221508a0f languageName: node linkType: hard "string-length@npm:^4.0.1": version: 4.0.2 resolution: "string-length@npm:4.0.2" dependencies: char-regex: ^1.0.2 strip-ansi: ^6.0.0 checksum: ce85533ef5113fcb7e522bcf9e62cb33871aa99b3729cec5595f4447f660b0cefd542ca6df4150c97a677d58b0cb727a3fe09ac1de94071d05526c73579bf505 languageName: node linkType: hard "string-width-cjs@npm:string-width@^4.2.0, string-width@npm:^1.0.2 || 2 || 3 || 4, string-width@npm:^4.1.0, string-width@npm:^4.2.0, string-width@npm:^4.2.3": version: 4.2.3 resolution: "string-width@npm:4.2.3" dependencies: emoji-regex: ^8.0.0 is-fullwidth-code-point: ^3.0.0 strip-ansi: ^6.0.1 checksum: e52c10dc3fbfcd6c3a15f159f54a90024241d0f149cf8aed2982a2d801d2e64df0bf1dc351cf8e95c3319323f9f220c16e740b06faecd53e2462df1d2b5443fb languageName: node linkType: hard "string-width@npm:^5.0.0, string-width@npm:^5.0.1, string-width@npm:^5.1.2": version: 5.1.2 resolution: "string-width@npm:5.1.2" dependencies: eastasianwidth: ^0.2.0 emoji-regex: ^9.2.2 strip-ansi: ^7.0.1 checksum: 7369deaa29f21dda9a438686154b62c2c5f661f8dda60449088f9f980196f7908fc39fdd1803e3e01541970287cf5deae336798337e9319a7055af89dafa7193 languageName: node linkType: hard "string.prototype.padend@npm:^3.0.0": version: 3.1.4 resolution: "string.prototype.padend@npm:3.1.4" dependencies: call-bind: ^1.0.2 define-properties: ^1.1.4 es-abstract: ^1.20.4 checksum: 76e07238fe31dc12177428f0436b7ed6985f6a7ba97470fd53e4f0a6d9860bfee127d81957f3073cc879b434233df143825d140581e1340278053ad993c92f6c languageName: node linkType: hard "string.prototype.trim@npm:^1.2.7": version: 1.2.7 resolution: "string.prototype.trim@npm:1.2.7" dependencies: call-bind: ^1.0.2 define-properties: ^1.1.4 es-abstract: ^1.20.4 checksum: 05b7b2d6af63648e70e44c4a8d10d8cc457536df78b55b9d6230918bde75c5987f6b8604438c4c8652eb55e4fc9725d2912789eb4ec457d6995f3495af190c09 languageName: node linkType: hard "string.prototype.trimend@npm:^1.0.6": version: 1.0.6 resolution: "string.prototype.trimend@npm:1.0.6" dependencies: call-bind: ^1.0.2 define-properties: ^1.1.4 es-abstract: ^1.20.4 checksum: 0fdc34645a639bd35179b5a08227a353b88dc089adf438f46be8a7c197fc3f22f8514c1c9be4629b3cd29c281582730a8cbbad6466c60f76b5f99cf2addb132e languageName: node linkType: hard "string.prototype.trimstart@npm:^1.0.6": version: 1.0.6 resolution: "string.prototype.trimstart@npm:1.0.6" dependencies: call-bind: ^1.0.2 define-properties: ^1.1.4 es-abstract: ^1.20.4 checksum: 89080feef416621e6ef1279588994305477a7a91648d9436490d56010a1f7adc39167cddac7ce0b9884b8cdbef086987c4dcb2960209f2af8bac0d23ceff4f41 languageName: node linkType: hard "string_decoder@npm:^1.1.1": version: 1.3.0 resolution: "string_decoder@npm:1.3.0" dependencies: safe-buffer: ~5.2.0 checksum: 8417646695a66e73aefc4420eb3b84cc9ffd89572861fe004e6aeb13c7bc00e2f616247505d2dbbef24247c372f70268f594af7126f43548565c68c117bdeb56 languageName: node linkType: hard "strip-ansi-cjs@npm:strip-ansi@^6.0.1, strip-ansi@npm:^6.0.0, strip-ansi@npm:^6.0.1": version: 6.0.1 resolution: "strip-ansi@npm:6.0.1" dependencies: ansi-regex: ^5.0.1 checksum: f3cd25890aef3ba6e1a74e20896c21a46f482e93df4a06567cebf2b57edabb15133f1f94e57434e0a958d61186087b1008e89c94875d019910a213181a14fc8c languageName: node linkType: hard "strip-ansi@npm:^7.0.1": version: 7.1.0 resolution: "strip-ansi@npm:7.1.0" dependencies: ansi-regex: ^6.0.1 checksum: 859c73fcf27869c22a4e4d8c6acfe690064659e84bef9458aa6d13719d09ca88dcfd40cbf31fd0be63518ea1a643fe070b4827d353e09533a5b0b9fd4553d64d languageName: node linkType: hard "strip-bom@npm:^3.0.0": version: 3.0.0 resolution: "strip-bom@npm:3.0.0" checksum: 8d50ff27b7ebe5ecc78f1fe1e00fcdff7af014e73cf724b46fb81ef889eeb1015fc5184b64e81a2efe002180f3ba431bdd77e300da5c6685d702780fbf0c8d5b languageName: node linkType: hard "strip-bom@npm:^4.0.0": version: 4.0.0 resolution: "strip-bom@npm:4.0.0" checksum: 9dbcfbaf503c57c06af15fe2c8176fb1bf3af5ff65003851a102749f875a6dbe0ab3b30115eccf6e805e9d756830d3e40ec508b62b3f1ddf3761a20ebe29d3f3 languageName: node linkType: hard "strip-final-newline@npm:^2.0.0": version: 2.0.0 resolution: "strip-final-newline@npm:2.0.0" checksum: 69412b5e25731e1938184b5d489c32e340605bb611d6140344abc3421b7f3c6f9984b21dff296dfcf056681b82caa3bb4cc996a965ce37bcfad663e92eae9c64 languageName: node linkType: hard "strip-final-newline@npm:^3.0.0": version: 3.0.0 resolution: "strip-final-newline@npm:3.0.0" checksum: 23ee263adfa2070cd0f23d1ac14e2ed2f000c9b44229aec9c799f1367ec001478469560abefd00c5c99ee6f0b31c137d53ec6029c53e9f32a93804e18c201050 languageName: node linkType: hard "strip-json-comments@npm:^3.1.1": version: 3.1.1 resolution: "strip-json-comments@npm:3.1.1" checksum: 492f73e27268f9b1c122733f28ecb0e7e8d8a531a6662efbd08e22cccb3f9475e90a1b82cab06a392f6afae6d2de636f977e231296400d0ec5304ba70f166443 languageName: node linkType: hard "supertap@npm:^3.0.1": version: 3.0.1 resolution: "supertap@npm:3.0.1" dependencies: indent-string: ^5.0.0 js-yaml: ^3.14.1 serialize-error: ^7.0.1 strip-ansi: ^7.0.1 checksum: ee3d71c1d25f7f15d4a849e72b0c5f430df7cd8f702cf082fdbec5642a9546be6557766745655fa3a3e9c88f7c7eed849f2d74457b5b72cb9d94a779c0c8a948 languageName: node linkType: hard "supports-color@npm:^5.3.0": version: 5.5.0 resolution: "supports-color@npm:5.5.0" dependencies: has-flag: ^3.0.0 checksum: 95f6f4ba5afdf92f495b5a912d4abee8dcba766ae719b975c56c084f5004845f6f5a5f7769f52d53f40e21952a6d87411bafe34af4a01e65f9926002e38e1dac languageName: node linkType: hard "supports-color@npm:^7.1.0": version: 7.2.0 resolution: "supports-color@npm:7.2.0" dependencies: has-flag: ^4.0.0 checksum: 3dda818de06ebbe5b9653e07842d9479f3555ebc77e9a0280caf5a14fb877ffee9ed57007c3b78f5a6324b8dbeec648d9e97a24e2ed9fdb81ddc69ea07100f4a languageName: node linkType: hard "supports-color@npm:^8.0.0": version: 8.1.1 resolution: "supports-color@npm:8.1.1" dependencies: has-flag: ^4.0.0 checksum: c052193a7e43c6cdc741eb7f378df605636e01ad434badf7324f17fb60c69a880d8d8fcdcb562cf94c2350e57b937d7425ab5b8326c67c2adc48f7c87c1db406 languageName: node linkType: hard "supports-preserve-symlinks-flag@npm:^1.0.0": version: 1.0.0 resolution: "supports-preserve-symlinks-flag@npm:1.0.0" checksum: 53b1e247e68e05db7b3808b99b892bd36fb096e6fba213a06da7fab22045e97597db425c724f2bbd6c99a3c295e1e73f3e4de78592289f38431049e1277ca0ae languageName: node linkType: hard "tar@npm:^6.1.11, tar@npm:^6.1.2": version: 6.1.15 resolution: "tar@npm:6.1.15" dependencies: chownr: ^2.0.0 fs-minipass: ^2.0.0 minipass: ^5.0.0 minizlib: ^2.1.1 mkdirp: ^1.0.3 yallist: ^4.0.0 checksum: f23832fceeba7578bf31907aac744ae21e74a66f4a17a9e94507acf460e48f6db598c7023882db33bab75b80e027c21f276d405e4a0322d58f51c7088d428268 languageName: node linkType: hard "temp-dir@npm:^3.0.0": version: 3.0.0 resolution: "temp-dir@npm:3.0.0" checksum: 577211e995d1d584dd60f1469351d45e8a5b4524e4a9e42d3bdd12cfde1d0bb8f5898311bef24e02aaafb69514c1feb58c7b4c33dcec7129da3b0861a4ca935b languageName: node linkType: hard "test-exclude@npm:^6.0.0": version: 6.0.0 resolution: "test-exclude@npm:6.0.0" dependencies: "@istanbuljs/schema": ^0.1.2 glob: ^7.1.4 minimatch: ^3.0.4 checksum: 3b34a3d77165a2cb82b34014b3aba93b1c4637a5011807557dc2f3da826c59975a5ccad765721c4648b39817e3472789f9b0fa98fc854c5c1c7a1e632aacdc28 languageName: node linkType: hard "text-table@npm:^0.2.0": version: 0.2.0 resolution: "text-table@npm:0.2.0" checksum: b6937a38c80c7f84d9c11dd75e49d5c44f71d95e810a3250bd1f1797fc7117c57698204adf676b71497acc205d769d65c16ae8fa10afad832ae1322630aef10a languageName: node linkType: hard "time-zone@npm:^1.0.0": version: 1.0.0 resolution: "time-zone@npm:1.0.0" checksum: e46f5a69b8c236dcd8e91e29d40d4e7a3495ed4f59888c3f84ce1d9678e20461421a6ba41233509d47dd94bc18f1a4377764838b21b584663f942b3426dcbce8 languageName: node linkType: hard "tmpl@npm:1.0.5": version: 1.0.5 resolution: "tmpl@npm:1.0.5" checksum: cd922d9b853c00fe414c5a774817be65b058d54a2d01ebb415840960406c669a0fc632f66df885e24cb022ec812739199ccbdb8d1164c3e513f85bfca5ab2873 languageName: node linkType: hard "to-fast-properties@npm:^2.0.0": version: 2.0.0 resolution: "to-fast-properties@npm:2.0.0" checksum: be2de62fe58ead94e3e592680052683b1ec986c72d589e7b21e5697f8744cdbf48c266fa72f6c15932894c10187b5f54573a3bcf7da0bfd964d5caf23d436168 languageName: node linkType: hard "to-regex-range@npm:^5.0.1": version: 5.0.1 resolution: "to-regex-range@npm:5.0.1" dependencies: is-number: ^7.0.0 checksum: f76fa01b3d5be85db6a2a143e24df9f60dd047d151062d0ba3df62953f2f697b16fe5dad9b0ac6191c7efc7b1d9dcaa4b768174b7b29da89d4428e64bc0a20ed languageName: node linkType: hard "tokenizers@workspace:.": version: 0.0.0-use.local resolution: "tokenizers@workspace:." dependencies: "@napi-rs/cli": ^2.14.6 "@swc-node/register": ^1.5.5 "@swc/core": ^1.3.32 "@taplo/cli": ^0.5.2 "@types/jest": ^29.5.1 "@typescript-eslint/eslint-plugin": ^5.50.0 "@typescript-eslint/parser": ^5.50.0 ava: ^5.1.1 benny: ^3.7.1 chalk: ^5.2.0 eslint: ^8.33.0 eslint-config-prettier: ^8.6.0 eslint-plugin-import: ^2.27.5 eslint-plugin-prettier: ^4.2.1 husky: ^8.0.3 jest: ^29.5.0 lint-staged: ^13.1.0 npm-run-all: ^4.1.5 prettier: ^2.8.3 ts-jest: ^29.1.0 typescript: ^5.0.0 languageName: unknown linkType: soft "ts-jest@npm:^29.1.0": version: 29.1.1 resolution: "ts-jest@npm:29.1.1" dependencies: bs-logger: 0.x fast-json-stable-stringify: 2.x jest-util: ^29.0.0 json5: ^2.2.3 lodash.memoize: 4.x make-error: 1.x semver: ^7.5.3 yargs-parser: ^21.0.1 peerDependencies: "@babel/core": ">=7.0.0-beta.0 <8" "@jest/types": ^29.0.0 babel-jest: ^29.0.0 jest: ^29.0.0 typescript: ">=4.3 <6" peerDependenciesMeta: "@babel/core": optional: true "@jest/types": optional: true babel-jest: optional: true esbuild: optional: true bin: ts-jest: cli.js checksum: a8c9e284ed4f819526749f6e4dc6421ec666f20ab44d31b0f02b4ed979975f7580b18aea4813172d43e39b29464a71899f8893dd29b06b4a351a3af8ba47b402 languageName: node linkType: hard "tsconfig-paths@npm:^3.14.2": version: 3.14.2 resolution: "tsconfig-paths@npm:3.14.2" dependencies: "@types/json5": ^0.0.29 json5: ^1.0.2 minimist: ^1.2.6 strip-bom: ^3.0.0 checksum: a6162eaa1aed680537f93621b82399c7856afd10ec299867b13a0675e981acac4e0ec00896860480efc59fc10fd0b16fdc928c0b885865b52be62cadac692447 languageName: node linkType: hard "tslib@npm:^1.8.1": version: 1.14.1 resolution: "tslib@npm:1.14.1" checksum: dbe628ef87f66691d5d2959b3e41b9ca0045c3ee3c7c7b906cc1e328b39f199bb1ad9e671c39025bd56122ac57dfbf7385a94843b1cc07c60a4db74795829acd languageName: node linkType: hard "tslib@npm:^2.5.0": version: 2.6.2 resolution: "tslib@npm:2.6.2" checksum: 329ea56123005922f39642318e3d1f0f8265d1e7fcb92c633e0809521da75eeaca28d2cf96d7248229deb40e5c19adf408259f4b9640afd20d13aecc1430f3ad languageName: node linkType: hard "tsutils@npm:^3.21.0": version: 3.21.0 resolution: "tsutils@npm:3.21.0" dependencies: tslib: ^1.8.1 peerDependencies: typescript: ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta" checksum: 1843f4c1b2e0f975e08c4c21caa4af4f7f65a12ac1b81b3b8489366826259323feb3fc7a243123453d2d1a02314205a7634e048d4a8009921da19f99755cdc48 languageName: node linkType: hard "type-check@npm:^0.4.0, type-check@npm:~0.4.0": version: 0.4.0 resolution: "type-check@npm:0.4.0" dependencies: prelude-ls: ^1.2.1 checksum: ec688ebfc9c45d0c30412e41ca9c0cdbd704580eb3a9ccf07b9b576094d7b86a012baebc95681999dd38f4f444afd28504cb3a89f2ef16b31d4ab61a0739025a languageName: node linkType: hard "type-detect@npm:4.0.8": version: 4.0.8 resolution: "type-detect@npm:4.0.8" checksum: 62b5628bff67c0eb0b66afa371bd73e230399a8d2ad30d852716efcc4656a7516904570cd8631a49a3ce57c10225adf5d0cbdcb47f6b0255fe6557c453925a15 languageName: node linkType: hard "type-fest@npm:^0.13.1": version: 0.13.1 resolution: "type-fest@npm:0.13.1" checksum: e6bf2e3c449f27d4ef5d56faf8b86feafbc3aec3025fc9a5fbe2db0a2587c44714521f9c30d8516a833c8c506d6263f5cc11267522b10c6ccdb6cc55b0a9d1c4 languageName: node linkType: hard "type-fest@npm:^0.20.2": version: 0.20.2 resolution: "type-fest@npm:0.20.2" checksum: 4fb3272df21ad1c552486f8a2f8e115c09a521ad7a8db3d56d53718d0c907b62c6e9141ba5f584af3f6830d0872c521357e512381f24f7c44acae583ad517d73 languageName: node linkType: hard "type-fest@npm:^0.21.3": version: 0.21.3 resolution: "type-fest@npm:0.21.3" checksum: e6b32a3b3877f04339bae01c193b273c62ba7bfc9e325b8703c4ee1b32dc8fe4ef5dfa54bf78265e069f7667d058e360ae0f37be5af9f153b22382cd55a9afe0 languageName: node linkType: hard "type-fest@npm:^1.0.2": version: 1.4.0 resolution: "type-fest@npm:1.4.0" checksum: b011c3388665b097ae6a109a437a04d6f61d81b7357f74cbcb02246f2f5bd72b888ae33631b99871388122ba0a87f4ff1c94078e7119ff22c70e52c0ff828201 languageName: node linkType: hard "typed-array-buffer@npm:^1.0.0": version: 1.0.0 resolution: "typed-array-buffer@npm:1.0.0" dependencies: call-bind: ^1.0.2 get-intrinsic: ^1.2.1 is-typed-array: ^1.1.10 checksum: 3e0281c79b2a40cd97fe715db803884301993f4e8c18e8d79d75fd18f796e8cd203310fec8c7fdb5e6c09bedf0af4f6ab8b75eb3d3a85da69328f28a80456bd3 languageName: node linkType: hard "typed-array-byte-length@npm:^1.0.0": version: 1.0.0 resolution: "typed-array-byte-length@npm:1.0.0" dependencies: call-bind: ^1.0.2 for-each: ^0.3.3 has-proto: ^1.0.1 is-typed-array: ^1.1.10 checksum: b03db16458322b263d87a702ff25388293f1356326c8a678d7515767ef563ef80e1e67ce648b821ec13178dd628eb2afdc19f97001ceae7a31acf674c849af94 languageName: node linkType: hard "typed-array-byte-offset@npm:^1.0.0": version: 1.0.0 resolution: "typed-array-byte-offset@npm:1.0.0" dependencies: available-typed-arrays: ^1.0.5 call-bind: ^1.0.2 for-each: ^0.3.3 has-proto: ^1.0.1 is-typed-array: ^1.1.10 checksum: 04f6f02d0e9a948a95fbfe0d5a70b002191fae0b8fe0fe3130a9b2336f043daf7a3dda56a31333c35a067a97e13f539949ab261ca0f3692c41603a46a94e960b languageName: node linkType: hard "typed-array-length@npm:^1.0.4": version: 1.0.4 resolution: "typed-array-length@npm:1.0.4" dependencies: call-bind: ^1.0.2 for-each: ^0.3.3 is-typed-array: ^1.1.9 checksum: 2228febc93c7feff142b8c96a58d4a0d7623ecde6c7a24b2b98eb3170e99f7c7eff8c114f9b283085cd59dcd2bd43aadf20e25bba4b034a53c5bb292f71f8956 languageName: node linkType: hard "typescript@npm:^5.0.0": version: 5.2.2 resolution: "typescript@npm:5.2.2" bin: tsc: bin/tsc tsserver: bin/tsserver checksum: 7912821dac4d962d315c36800fe387cdc0a6298dba7ec171b350b4a6e988b51d7b8f051317786db1094bd7431d526b648aba7da8236607febb26cf5b871d2d3c languageName: node linkType: hard "typescript@patch:typescript@^5.0.0#~builtin<compat/typescript>": version: 5.2.2 resolution: "typescript@patch:typescript@npm%3A5.2.2#~builtin<compat/typescript>::version=5.2.2&hash=77c9e2" bin: tsc: bin/tsc tsserver: bin/tsserver checksum: 07106822b4305de3f22835cbba949a2b35451cad50888759b6818421290ff95d522b38ef7919e70fb381c5fe9c1c643d7dea22c8b31652a717ddbd57b7f4d554 languageName: node linkType: hard "unbox-primitive@npm:^1.0.2": version: 1.0.2 resolution: "unbox-primitive@npm:1.0.2" dependencies: call-bind: ^1.0.2 has-bigints: ^1.0.2 has-symbols: ^1.0.3 which-boxed-primitive: ^1.0.2 checksum: b7a1cf5862b5e4b5deb091672ffa579aa274f648410009c81cca63fed3b62b610c4f3b773f912ce545bb4e31edc3138975b5bc777fc6e4817dca51affb6380e9 languageName: node linkType: hard "unique-filename@npm:^3.0.0": version: 3.0.0 resolution: "unique-filename@npm:3.0.0" dependencies: unique-slug: ^4.0.0 checksum: 8e2f59b356cb2e54aab14ff98a51ac6c45781d15ceaab6d4f1c2228b780193dc70fae4463ce9e1df4479cb9d3304d7c2043a3fb905bdeca71cc7e8ce27e063df languageName: node linkType: hard "unique-slug@npm:^4.0.0": version: 4.0.0 resolution: "unique-slug@npm:4.0.0" dependencies: imurmurhash: ^0.1.4 checksum: 0884b58365af59f89739e6f71e3feacb5b1b41f2df2d842d0757933620e6de08eff347d27e9d499b43c40476cbaf7988638d3acb2ffbcb9d35fd035591adfd15 languageName: node linkType: hard "universalify@npm:^2.0.0": version: 2.0.0 resolution: "universalify@npm:2.0.0" checksum: 2406a4edf4a8830aa6813278bab1f953a8e40f2f63a37873ffa9a3bc8f9745d06cc8e88f3572cb899b7e509013f7f6fcc3e37e8a6d914167a5381d8440518c44 languageName: node linkType: hard "update-browserslist-db@npm:^1.0.11": version: 1.0.11 resolution: "update-browserslist-db@npm:1.0.11" dependencies: escalade: ^3.1.1 picocolors: ^1.0.0 peerDependencies: browserslist: ">= 4.21.0" bin: update-browserslist-db: cli.js checksum: b98327518f9a345c7cad5437afae4d2ae7d865f9779554baf2a200fdf4bac4969076b679b1115434bd6557376bdd37ca7583d0f9b8f8e302d7d4cc1e91b5f231 languageName: node linkType: hard "uri-js@npm:^4.2.2": version: 4.4.1 resolution: "uri-js@npm:4.4.1" dependencies: punycode: ^2.1.0 checksum: 7167432de6817fe8e9e0c9684f1d2de2bb688c94388f7569f7dbdb1587c9f4ca2a77962f134ec90be0cc4d004c939ff0d05acc9f34a0db39a3c797dada262633 languageName: node linkType: hard "util-deprecate@npm:^1.0.1": version: 1.0.2 resolution: "util-deprecate@npm:1.0.2" checksum: 474acf1146cb2701fe3b074892217553dfcf9a031280919ba1b8d651a068c9b15d863b7303cb15bd00a862b498e6cf4ad7b4a08fb134edd5a6f7641681cb54a2 languageName: node linkType: hard "v8-to-istanbul@npm:^9.0.1": version: 9.1.0 resolution: "v8-to-istanbul@npm:9.1.0" dependencies: "@jridgewell/trace-mapping": ^0.3.12 "@types/istanbul-lib-coverage": ^2.0.1 convert-source-map: ^1.6.0 checksum: 2069d59ee46cf8d83b4adfd8a5c1a90834caffa9f675e4360f1157ffc8578ef0f763c8f32d128334424159bb6b01f3876acd39cd13297b2769405a9da241f8d1 languageName: node linkType: hard "validate-npm-package-license@npm:^3.0.1": version: 3.0.4 resolution: "validate-npm-package-license@npm:3.0.4" dependencies: spdx-correct: ^3.0.0 spdx-expression-parse: ^3.0.0 checksum: 35703ac889d419cf2aceef63daeadbe4e77227c39ab6287eeb6c1b36a746b364f50ba22e88591f5d017bc54685d8137bc2d328d0a896e4d3fd22093c0f32a9ad languageName: node linkType: hard "walker@npm:^1.0.8": version: 1.0.8 resolution: "walker@npm:1.0.8" dependencies: makeerror: 1.0.12 checksum: ad7a257ea1e662e57ef2e018f97b3c02a7240ad5093c392186ce0bcf1f1a60bbadd520d073b9beb921ed99f64f065efb63dfc8eec689a80e569f93c1c5d5e16c languageName: node linkType: hard "well-known-symbols@npm:^2.0.0": version: 2.0.0 resolution: "well-known-symbols@npm:2.0.0" checksum: 4f54bbc3012371cb4d228f436891b8e7536d34ac61a57541890257e96788608e096231e0121ac24d08ef2f908b3eb2dc0adba35023eaeb2a7df655da91415402 languageName: node linkType: hard "which-boxed-primitive@npm:^1.0.2": version: 1.0.2 resolution: "which-boxed-primitive@npm:1.0.2" dependencies: is-bigint: ^1.0.1 is-boolean-object: ^1.1.0 is-number-object: ^1.0.4 is-string: ^1.0.5 is-symbol: ^1.0.3 checksum: 53ce774c7379071729533922adcca47220228405e1895f26673bbd71bdf7fb09bee38c1d6399395927c6289476b5ae0629863427fd151491b71c4b6cb04f3a5e languageName: node linkType: hard "which-typed-array@npm:^1.1.10, which-typed-array@npm:^1.1.11": version: 1.1.11 resolution: "which-typed-array@npm:1.1.11" dependencies: available-typed-arrays: ^1.0.5 call-bind: ^1.0.2 for-each: ^0.3.3 gopd: ^1.0.1 has-tostringtag: ^1.0.0 checksum: 711ffc8ef891ca6597b19539075ec3e08bb9b4c2ca1f78887e3c07a977ab91ac1421940505a197758fb5939aa9524976d0a5bbcac34d07ed6faa75cedbb17206 languageName: node linkType: hard "which@npm:^1.2.9": version: 1.3.1 resolution: "which@npm:1.3.1" dependencies: isexe: ^2.0.0 bin: which: ./bin/which checksum: f2e185c6242244b8426c9df1510e86629192d93c1a986a7d2a591f2c24869e7ffd03d6dac07ca863b2e4c06f59a4cc9916c585b72ee9fa1aa609d0124df15e04 languageName: node linkType: hard "which@npm:^2.0.1, which@npm:^2.0.2": version: 2.0.2 resolution: "which@npm:2.0.2" dependencies: isexe: ^2.0.0 bin: node-which: ./bin/node-which checksum: 1a5c563d3c1b52d5f893c8b61afe11abc3bab4afac492e8da5bde69d550de701cf9806235f20a47b5c8fa8a1d6a9135841de2596535e998027a54589000e66d1 languageName: node linkType: hard "wide-align@npm:^1.1.5": version: 1.1.5 resolution: "wide-align@npm:1.1.5" dependencies: string-width: ^1.0.2 || 2 || 3 || 4 checksum: d5fc37cd561f9daee3c80e03b92ed3e84d80dde3365a8767263d03dacfc8fa06b065ffe1df00d8c2a09f731482fcacae745abfbb478d4af36d0a891fad4834d3 languageName: node linkType: hard "wrap-ansi-cjs@npm:wrap-ansi@^7.0.0, wrap-ansi@npm:^7.0.0": version: 7.0.0 resolution: "wrap-ansi@npm:7.0.0" dependencies: ansi-styles: ^4.0.0 string-width: ^4.1.0 strip-ansi: ^6.0.0 checksum: a790b846fd4505de962ba728a21aaeda189b8ee1c7568ca5e817d85930e06ef8d1689d49dbf0e881e8ef84436af3a88bc49115c2e2788d841ff1b8b5b51a608b languageName: node linkType: hard "wrap-ansi@npm:^6.2.0": version: 6.2.0 resolution: "wrap-ansi@npm:6.2.0" dependencies: ansi-styles: ^4.0.0 string-width: ^4.1.0 strip-ansi: ^6.0.0 checksum: 6cd96a410161ff617b63581a08376f0cb9162375adeb7956e10c8cd397821f7eb2a6de24eb22a0b28401300bf228c86e50617cd568209b5f6775b93c97d2fe3a languageName: node linkType: hard "wrap-ansi@npm:^8.0.1, wrap-ansi@npm:^8.1.0": version: 8.1.0 resolution: "wrap-ansi@npm:8.1.0" dependencies: ansi-styles: ^6.1.0 string-width: ^5.0.1 strip-ansi: ^7.0.1 checksum: 371733296dc2d616900ce15a0049dca0ef67597d6394c57347ba334393599e800bab03c41d4d45221b6bc967b8c453ec3ae4749eff3894202d16800fdfe0e238 languageName: node linkType: hard "wrappy@npm:1": version: 1.0.2 resolution: "wrappy@npm:1.0.2" checksum: 159da4805f7e84a3d003d8841557196034155008f817172d4e986bd591f74aa82aa7db55929a54222309e01079a65a92a9e6414da5a6aa4b01ee44a511ac3ee5 languageName: node linkType: hard "write-file-atomic@npm:^4.0.2": version: 4.0.2 resolution: "write-file-atomic@npm:4.0.2" dependencies: imurmurhash: ^0.1.4 signal-exit: ^3.0.7 checksum: 5da60bd4eeeb935eec97ead3df6e28e5917a6bd317478e4a85a5285e8480b8ed96032bbcc6ecd07b236142a24f3ca871c924ec4a6575e623ec1b11bf8c1c253c languageName: node linkType: hard "write-file-atomic@npm:^5.0.1": version: 5.0.1 resolution: "write-file-atomic@npm:5.0.1" dependencies: imurmurhash: ^0.1.4 signal-exit: ^4.0.1 checksum: 8dbb0e2512c2f72ccc20ccedab9986c7d02d04039ed6e8780c987dc4940b793339c50172a1008eed7747001bfacc0ca47562668a069a7506c46c77d7ba3926a9 languageName: node linkType: hard "y18n@npm:^5.0.5": version: 5.0.8 resolution: "y18n@npm:5.0.8" checksum: 54f0fb95621ee60898a38c572c515659e51cc9d9f787fb109cef6fde4befbe1c4602dc999d30110feee37456ad0f1660fa2edcfde6a9a740f86a290999550d30 languageName: node linkType: hard "yallist@npm:^3.0.2": version: 3.1.1 resolution: "yallist@npm:3.1.1" checksum: 48f7bb00dc19fc635a13a39fe547f527b10c9290e7b3e836b9a8f1ca04d4d342e85714416b3c2ab74949c9c66f9cebb0473e6bc353b79035356103b47641285d languageName: node linkType: hard "yallist@npm:^4.0.0": version: 4.0.0 resolution: "yallist@npm:4.0.0" checksum: 343617202af32df2a15a3be36a5a8c0c8545208f3d3dfbc6bb7c3e3b7e8c6f8e7485432e4f3b88da3031a6e20afa7c711eded32ddfb122896ac5d914e75848d5 languageName: node linkType: hard "yaml@npm:2.3.1": version: 2.3.1 resolution: "yaml@npm:2.3.1" checksum: 2c7bc9a7cd4c9f40d3b0b0a98e370781b68b8b7c4515720869aced2b00d92f5da1762b4ffa947f9e795d6cd6b19f410bd4d15fdd38aca7bd96df59bd9486fb54 languageName: node linkType: hard "yargs-parser@npm:^21.0.1, yargs-parser@npm:^21.1.1": version: 21.1.1 resolution: "yargs-parser@npm:21.1.1" checksum: ed2d96a616a9e3e1cc7d204c62ecc61f7aaab633dcbfab2c6df50f7f87b393993fe6640d017759fe112d0cb1e0119f2b4150a87305cc873fd90831c6a58ccf1c languageName: node linkType: hard "yargs@npm:^17.3.1, yargs@npm:^17.7.2": version: 17.7.2 resolution: "yargs@npm:17.7.2" dependencies: cliui: ^8.0.1 escalade: ^3.1.1 get-caller-file: ^2.0.5 require-directory: ^2.1.1 string-width: ^4.2.3 y18n: ^5.0.5 yargs-parser: ^21.1.1 checksum: 73b572e863aa4a8cbef323dd911d79d193b772defd5a51aab0aca2d446655216f5002c42c5306033968193bdbf892a7a4c110b0d77954a7fdf563e653967b56a languageName: node linkType: hard "yocto-queue@npm:^0.1.0": version: 0.1.0 resolution: "yocto-queue@npm:0.1.0" checksum: f77b3d8d00310def622123df93d4ee654fc6a0096182af8bd60679ddcdfb3474c56c6c7190817c84a2785648cdee9d721c0154eb45698c62176c322fb46fc700 languageName: node linkType: hard "yocto-queue@npm:^1.0.0": version: 1.0.0 resolution: "yocto-queue@npm:1.0.0" checksum: 2cac84540f65c64ccc1683c267edce396b26b1e931aa429660aefac8fbe0188167b7aee815a3c22fa59a28a58d898d1a2b1825048f834d8d629f4c2a5d443801 languageName: node linkType: hard
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/.editorconfig
# EditorConfig helps developers define and maintain consistent # coding styles between different editors or IDEs # http://editorconfig.org root = true [*] indent_style = space indent_size = 2 end_of_line = lf charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true [*.md] trim_trailing_whitespace = false
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/index.d.ts
/* tslint:disable */ /* eslint-disable */ /* auto-generated by NAPI-RS */ export function bpeDecoder(suffix?: string | undefined | null): Decoder export function byteFallbackDecoder(): Decoder export function ctcDecoder( padToken?: string = '<pad>', wordDelimiterToken?: string | undefined | null, cleanup?: boolean | undefined | null, ): Decoder export function fuseDecoder(): Decoder export function metaspaceDecoder(replacement?: string = '▁', addPrefixSpace?: bool = true): Decoder export function replaceDecoder(pattern: string, content: string): Decoder export function sequenceDecoder(decoders: Array<Decoder>): Decoder export function stripDecoder(content: string, left: number, right: number): Decoder export function wordPieceDecoder(prefix?: string = '##', cleanup?: bool = true): Decoder export const enum TruncationDirection { Left = 'Left', Right = 'Right', } export const enum TruncationStrategy { LongestFirst = 'LongestFirst', OnlyFirst = 'OnlyFirst', OnlySecond = 'OnlySecond', } export interface BpeOptions { cacheCapacity?: number dropout?: number unkToken?: string continuingSubwordPrefix?: string endOfWordSuffix?: string fuseUnk?: boolean byteFallback?: boolean } export interface WordPieceOptions { unkToken?: string continuingSubwordPrefix?: string maxInputCharsPerWord?: number } export interface WordLevelOptions { unkToken?: string } export interface UnigramOptions { unkId?: number byteFallback?: boolean } export function prependNormalizer(prepend: string): Normalizer export function stripAccentsNormalizer(): Normalizer export interface BertNormalizerOptions { cleanText?: boolean handleChineseChars?: boolean stripAccents?: boolean lowercase?: boolean } /** * bert_normalizer(options?: { * cleanText?: bool = true, * handleChineseChars?: bool = true, * stripAccents?: bool = true, * lowercase?: bool = true * }) */ export function bertNormalizer(options?: BertNormalizerOptions | undefined | null): Normalizer export function nfdNormalizer(): Normalizer export function nfkdNormalizer(): Normalizer export function nfcNormalizer(): Normalizer export function nfkcNormalizer(): Normalizer export function stripNormalizer(left?: boolean | undefined | null, right?: boolean | undefined | null): Normalizer export function sequenceNormalizer(normalizers: Array<Normalizer>): Normalizer export function lowercase(): Normalizer export function replace(pattern: string, content: string): Normalizer export function nmt(): Normalizer export function precompiled(bytes: Array<number>): Normalizer export const enum JsSplitDelimiterBehavior { Removed = 'Removed', Isolated = 'Isolated', MergedWithPrevious = 'MergedWithPrevious', MergedWithNext = 'MergedWithNext', Contiguous = 'Contiguous', } /** byte_level(addPrefixSpace: bool = true, useRegex: bool = true) */ export function byteLevelPreTokenizer( addPrefixSpace?: boolean | undefined | null, useRegex?: boolean | undefined | null, ): PreTokenizer export function byteLevelAlphabet(): Array<string> export function whitespacePreTokenizer(): PreTokenizer export function whitespaceSplitPreTokenizer(): PreTokenizer export function bertPreTokenizer(): PreTokenizer export function metaspacePreTokenizer(replacement?: string = '▁', addPrefixSpace?: bool = true): PreTokenizer export function splitPreTokenizer(pattern: string, behavior: string, invert?: boolean | undefined | null): PreTokenizer export function punctuationPreTokenizer(behavior?: string | undefined | null): PreTokenizer export function sequencePreTokenizer(preTokenizers: Array<PreTokenizer>): PreTokenizer export function charDelimiterSplit(delimiter: string): PreTokenizer export function digitsPreTokenizer(individualDigits?: boolean | undefined | null): PreTokenizer export function bertProcessing(sep: [string, number], cls: [string, number]): Processor export function robertaProcessing( sep: [string, number], cls: [string, number], trimOffsets?: boolean | undefined | null, addPrefixSpace?: boolean | undefined | null, ): Processor export function byteLevelProcessing(trimOffsets?: boolean | undefined | null): Processor export function templateProcessing( single: string, pair?: string | undefined | null, specialTokens?: Array<[string, number]> | undefined | null, ): Processor export function sequenceProcessing(processors: Array<Processor>): Processor export const enum PaddingDirection { Left = 0, Right = 1, } export interface PaddingOptions { maxLength?: number direction?: string | PaddingDirection padToMultipleOf?: number padId?: number padTypeId?: number padToken?: string } export interface EncodeOptions { isPretokenized?: boolean addSpecialTokens?: boolean } export interface TruncationOptions { maxLength?: number strategy?: TruncationStrategy direction?: string | TruncationDirection stride?: number } export interface AddedTokenOptions { singleWord?: boolean leftStrip?: boolean rightStrip?: boolean normalized?: boolean } export interface JsFromPretrainedParameters { revision?: string authToken?: string } export function slice(s: string, beginIndex?: number | undefined | null, endIndex?: number | undefined | null): string export function mergeEncodings(encodings: Array<Encoding>, growingOffsets?: boolean | undefined | null): Encoding /** Decoder */ export class Decoder { decode(tokens: Array<string>): string } export type JsEncoding = Encoding export class Encoding { constructor() getLength(): number getNSequences(): number getIds(): Array<number> getTypeIds(): Array<number> getAttentionMask(): Array<number> getSpecialTokensMask(): Array<number> getTokens(): Array<string> getOffsets(): Array<Array<number>> getWordIds(): Array<number | undefined | null> charToToken(pos: number, seqId?: number | undefined | null): number | null charToWord(pos: number, seqId?: number | undefined | null): number | null pad(length: number, options?: PaddingOptions | undefined | null): void truncate( length: number, stride?: number | undefined | null, direction?: string | TruncationDirection | undefined | null, ): void wordToTokens(word: number, seqId?: number | undefined | null): [number, number] | null | undefined wordToChars(word: number, seqId?: number | undefined | null): [number, number] | null | undefined tokenToChars(token: number): [number, [number, number]] | null | undefined tokenToWord(token: number): number | null getOverflowing(): Array<Encoding> getSequenceIds(): Array<number | undefined | null> tokenToSequence(token: number): number | null } export class Model {} export type Bpe = BPE export class BPE { static empty(): Model static init(vocab: Vocab, merges: Merges, options?: BpeOptions | undefined | null): Model static fromFile(vocab: string, merges: string, options?: BpeOptions | undefined | null): Promise<Model> } export class WordPiece { static init(vocab: Vocab, options?: WordPieceOptions | undefined | null): Model static empty(): WordPiece static fromFile(vocab: string, options?: WordPieceOptions | undefined | null): Promise<Model> } export class WordLevel { static init(vocab: Vocab, options?: WordLevelOptions | undefined | null): Model static empty(): WordLevel static fromFile(vocab: string, options?: WordLevelOptions | undefined | null): Promise<Model> } export class Unigram { static init(vocab: Array<[string, number]>, options?: UnigramOptions | undefined | null): Model static empty(): Model } /** Normalizer */ export class Normalizer { normalizeString(sequence: string): string } /** PreTokenizers */ export class PreTokenizer { preTokenizeString(sequence: string): [string, [number, number]][] } export class Processor {} export class AddedToken { constructor(token: string, isSpecial: boolean, options?: AddedTokenOptions | undefined | null) getContent(): string } export class Tokenizer { constructor(model: Model) setPreTokenizer(preTokenizer: PreTokenizer): void setDecoder(decoder: Decoder): void setModel(model: Model): void setPostProcessor(postProcessor: Processor): void setNormalizer(normalizer: Normalizer): void save(path: string, pretty?: boolean | undefined | null): void addAddedTokens(tokens: Array<AddedToken>): number addTokens(tokens: Array<string>): number encode( sentence: InputSequence, pair?: InputSequence | null, encodeOptions?: EncodeOptions | undefined | null, ): Promise<JsEncoding> encodeBatch(sentences: EncodeInput[], encodeOptions?: EncodeOptions | undefined | null): Promise<JsEncoding[]> decode(ids: Array<number>, skipSpecialTokens: boolean): Promise<string> decodeBatch(ids: Array<Array<number>>, skipSpecialTokens: boolean): Promise<string[]> static fromString(s: string): Tokenizer static fromFile(file: string): Tokenizer addSpecialTokens(tokens: Array<string>): void setTruncation(maxLength: number, options?: TruncationOptions | undefined | null): void disableTruncation(): void setPadding(options?: PaddingOptions | undefined | null): void disablePadding(): void getDecoder(): Decoder | null getNormalizer(): Normalizer | null getPreTokenizer(): PreTokenizer | null getPostProcessor(): Processor | null getVocab(withAddedTokens?: boolean | undefined | null): Record<string, number> getVocabSize(withAddedTokens?: boolean | undefined | null): number idToToken(id: number): string | null tokenToId(token: string): number | null train(files: Array<string>): void runningTasks(): number postProcess( encoding: Encoding, pair?: Encoding | undefined | null, addSpecialTokens?: boolean | undefined | null, ): Encoding } export class Trainer {}
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/.taplo.toml
exclude = ["node_modules/**/*.toml"] # https://taplo.tamasfe.dev/configuration/formatter-options.html [formatting] align_entries = true indent_tables = true reorder_keys = true
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/rustfmt.toml
tab_spaces = 2
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/Makefile
.PHONY: style check-style test DATA_DIR = data dir_guard=@mkdir -p $(@D) # Format source code automatically style: npm run lint # Check the source code is formatted correctly check-style: npm run lint-check TESTS_RESOURCES = $(DATA_DIR)/small.txt $(DATA_DIR)/roberta.json $(DATA_DIR)/tokenizer-wiki.json $(DATA_DIR)/bert-wiki.json # Launch the test suite test: $(TESTS_RESOURCES) npm run test $(DATA_DIR)/big.txt : $(dir_guard) wget https://norvig.com/big.txt -O $@ $(DATA_DIR)/small.txt : $(DATA_DIR)/big.txt head -100 $(DATA_DIR)/big.txt > $@ $(DATA_DIR)/roberta.json : $(dir_guard) wget https://huggingface.co/roberta-large/raw/main/tokenizer.json -O $@ $(DATA_DIR)/tokenizer-wiki.json : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-quicktour/tokenizer.json -O $@ $(DATA_DIR)/bert-wiki.json : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-pipeline/tokenizer.json -O $@
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/Cargo.toml
[package] authors = ["Nicolas Patry <[email protected]>"] edition = "2021" name = "node" version = "0.15.1-dev.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [lib] crate-type = ["cdylib"] [dependencies] napi = "2" napi-derive = "2" serde = { version = "1.0.163", features = ["derive"] } tokenizers = { path = "../../tokenizers/" } [build-dependencies] napi-build = "2" [profile.release] lto = true
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/node/LICENSE
MIT License Copyright (c) 2020 N-API for Rust Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/linux-arm-gnueabihf/README.md
# `tokenizers-linux-arm-gnueabihf` This is the **armv7-unknown-linux-gnueabihf** binary for `tokenizers`
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/linux-arm-gnueabihf/package.json
{ "name": "tokenizers-linux-arm-gnueabihf", "version": "0.13.4-rc1", "os": [ "linux" ], "cpu": [ "arm" ], "main": "tokenizers.linux-arm-gnueabihf.node", "files": [ "tokenizers.linux-arm-gnueabihf.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers" }
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/win32-arm64-msvc/README.md
# `tokenizers-win32-arm64-msvc` This is the **aarch64-pc-windows-msvc** binary for `tokenizers`
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/win32-arm64-msvc/package.json
{ "name": "tokenizers-win32-arm64-msvc", "version": "0.13.4-rc1", "os": [ "win32" ], "cpu": [ "arm64" ], "main": "tokenizers.win32-arm64-msvc.node", "files": [ "tokenizers.win32-arm64-msvc.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers" }
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/android-arm-eabi/README.md
# `tokenizers-android-arm-eabi` This is the **armv7-linux-androideabi** binary for `tokenizers`
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/android-arm-eabi/package.json
{ "name": "tokenizers-android-arm-eabi", "version": "0.13.4-rc1", "os": [ "android" ], "cpu": [ "arm" ], "main": "tokenizers.android-arm-eabi.node", "files": [ "tokenizers.android-arm-eabi.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers" }
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/linux-x64-gnu/README.md
# `tokenizers-linux-x64-gnu` This is the **x86_64-unknown-linux-gnu** binary for `tokenizers`
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/linux-x64-gnu/package.json
{ "name": "tokenizers-linux-x64-gnu", "version": "0.13.4-rc1", "os": [ "linux" ], "cpu": [ "x64" ], "main": "tokenizers.linux-x64-gnu.node", "files": [ "tokenizers.linux-x64-gnu.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers", "libc": [ "glibc" ] }
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/win32-x64-msvc/README.md
# `tokenizers-win32-x64-msvc` This is the **x86_64-pc-windows-msvc** binary for `tokenizers`
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/win32-x64-msvc/package.json
{ "name": "tokenizers-win32-x64-msvc", "version": "0.13.4-rc1", "os": [ "win32" ], "cpu": [ "x64" ], "main": "tokenizers.win32-x64-msvc.node", "files": [ "tokenizers.win32-x64-msvc.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers" }
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/freebsd-x64/README.md
# `tokenizers-freebsd-x64` This is the **x86_64-unknown-freebsd** binary for `tokenizers`
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/freebsd-x64/package.json
{ "name": "tokenizers-freebsd-x64", "version": "0.13.4-rc1", "os": [ "freebsd" ], "cpu": [ "x64" ], "main": "tokenizers.freebsd-x64.node", "files": [ "tokenizers.freebsd-x64.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers" }
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/darwin-arm64/README.md
# `tokenizers-darwin-arm64` This is the **aarch64-apple-darwin** binary for `tokenizers`
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/darwin-arm64/package.json
{ "name": "tokenizers-darwin-arm64", "version": "0.13.4-rc1", "os": [ "darwin" ], "cpu": [ "arm64" ], "main": "tokenizers.darwin-arm64.node", "files": [ "tokenizers.darwin-arm64.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers" }
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/darwin-x64/README.md
# `tokenizers-darwin-x64` This is the **x86_64-apple-darwin** binary for `tokenizers`
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/darwin-x64/package.json
{ "name": "tokenizers-darwin-x64", "version": "0.13.4-rc1", "os": [ "darwin" ], "cpu": [ "x64" ], "main": "tokenizers.darwin-x64.node", "files": [ "tokenizers.darwin-x64.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers" }
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/linux-x64-musl/README.md
# `tokenizers-linux-x64-musl` This is the **x86_64-unknown-linux-musl** binary for `tokenizers`
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/linux-x64-musl/package.json
{ "name": "tokenizers-linux-x64-musl", "version": "0.13.4-rc1", "os": [ "linux" ], "cpu": [ "x64" ], "main": "tokenizers.linux-x64-musl.node", "files": [ "tokenizers.linux-x64-musl.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers", "libc": [ "musl" ] }
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/linux-arm64-musl/README.md
# `tokenizers-linux-arm64-musl` This is the **aarch64-unknown-linux-musl** binary for `tokenizers`
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/linux-arm64-musl/package.json
{ "name": "tokenizers-linux-arm64-musl", "version": "0.13.4-rc1", "os": [ "linux" ], "cpu": [ "arm64" ], "main": "tokenizers.linux-arm64-musl.node", "files": [ "tokenizers.linux-arm64-musl.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers", "libc": [ "musl" ] }
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/linux-arm64-gnu/README.md
# `tokenizers-linux-arm64-gnu` This is the **aarch64-unknown-linux-gnu** binary for `tokenizers`
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/linux-arm64-gnu/package.json
{ "name": "tokenizers-linux-arm64-gnu", "version": "0.13.4-rc1", "os": [ "linux" ], "cpu": [ "arm64" ], "main": "tokenizers.linux-arm64-gnu.node", "files": [ "tokenizers.linux-arm64-gnu.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers", "libc": [ "glibc" ] }
0
hf_public_repos/tokenizers/bindings/node/npm
hf_public_repos/tokenizers/bindings/node/npm/win32-ia32-msvc/README.md
# `tokenizers-win32-ia32-msvc` This is the **i686-pc-windows-msvc** binary for `tokenizers`
0