text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
# coding=utf-8 # Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for BigBird.""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/bigbird-roberta-base": 4096, "google/bigbird-roberta-large": 4096, "google/bigbird-base-trivia-itc": 4096, } class BigBirdTokenizer(PreTrainedTokenizer): """ Construct a BigBird tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The begin of sequence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] prefix_tokens: List[int] = [] def __init__( self, vocab_file, unk_token="<unk>", bos_token="<s>", eos_token="</s>", pad_token="<pad>", sep_token="[SEP]", mask_token="[MASK]", cls_token="[CLS]", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, mask_token=mask_token, cls_token=cls_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _tokenize(self, text: str) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" prev_is_special = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def _decode( self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, spaces_between_special_tokens: bool = True, **kwargs, ) -> str: self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 sub_texts = [] current_sub_text = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: text = re.sub(r" (\[(MASK|SEP)\])", r"\1", " ".join(sub_texts)) else: text = "".join(sub_texts) clean_up_tokenization_spaces = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Big Bird sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
transformers/src/transformers/models/big_bird/tokenization_big_bird.py/0
{ "file_path": "transformers/src/transformers/models/big_bird/tokenization_big_bird.py", "repo_id": "transformers", "token_count": 6494 }
312
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_blenderbot_fast"] = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_blenderbot"] = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_blenderbot"] = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_blenderbot"] = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/blenderbot/__init__.py/0
{ "file_path": "transformers/src/transformers/models/blenderbot/__init__.py", "repo_id": "transformers", "token_count": 1632 }
313
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Blip model configuration""" import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = { "Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json", "Salesforce/blip-vqa-capfit-large": ( "https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json" ), "Salesforce/blip-image-captioning-base": ( "https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json" ), "Salesforce/blip-image-captioning-large": ( "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json" ), "Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json", "Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json", "Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json", "Salesforce/blip-itm-large-flikr": ( "https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json" ), } class BlipTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BlipTextModel`]. It is used to instantiate a BLIP text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `BlipText` used by the [base architectures](https://huggingface.co/Salesforce/blip-vqa-base). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30524): Vocabulary size of the `Blip` text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BlipModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. encoder_hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers from the vision model. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. bos_token_id (`int`, *optional*, defaults to 30522): The id of the `beginning-of-sequence` token. eos_token_id (`int`, *optional*, defaults to 2): The id of the `end-of-sequence` token. pad_token_id (`int`, *optional*, defaults to 0): The id of the `padding` token. sep_token_id (`int`, *optional*, defaults to 102): The id of the `separator` token. is_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as a decoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Example: ```python >>> from transformers import BlipTextConfig, BlipTextModel >>> # Initializing a BlipTextConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipTextConfig() >>> # Initializing a BlipTextModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "blip_text_model" def __init__( self, vocab_size=30524, hidden_size=768, encoder_hidden_size=768, intermediate_size=3072, projection_dim=768, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=512, hidden_act="gelu", layer_norm_eps=1e-12, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, bos_token_id=30522, eos_token_id=2, pad_token_id=0, sep_token_id=102, is_decoder=True, use_cache=True, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, sep_token_id=sep_token_id, **kwargs, ) self.vocab_size = vocab_size self.hidden_size = hidden_size self.encoder_hidden_size = encoder_hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.hidden_dropout_prob = hidden_dropout_prob self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.attention_probs_dropout_prob = attention_probs_dropout_prob self.is_decoder = is_decoder self.use_cache = use_cache @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) # get the text config dict if we are loading from BlipConfig if config_dict.get("model_type") == "blip": config_dict = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) class BlipVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BlipVisionModel`]. It is used to instantiate a BLIP vision model according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the Blip-base [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. image_size (`int`, *optional*, defaults to 384): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 1e-10): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import BlipVisionConfig, BlipVisionModel >>> # Initializing a BlipVisionConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipVisionConfig() >>> # Initializing a BlipVisionModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "blip_vision_model" def __init__( self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, image_size=384, patch_size=16, hidden_act="gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=1e-10, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) # get the vision config dict if we are loading from BlipConfig if config_dict.get("model_type") == "blip": config_dict = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) class BlipConfig(PretrainedConfig): r""" [`BlipConfig`] is the configuration class to store the configuration of a [`BlipModel`]. It is used to instantiate a BLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-base [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`BlipTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`BlipVisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimentionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The inital value of the *logit_scale* paramter. Default is used as per the original BLIP implementation. image_text_hidden_size (`int`, *optional*, defaults to 256): Dimentionality of the hidden state of the image-text fusion layer. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import BlipConfig, BlipModel >>> # Initializing a BlipConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipConfig() >>> # Initializing a BlipPModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a BlipConfig from a BlipTextConfig and a BlipVisionConfig >>> # Initializing a BLIPText and BLIPVision configuration >>> config_text = BlipTextConfig() >>> config_vision = BlipVisionConfig() >>> config = BlipConfig.from_text_vision_configs(config_text, config_vision) ```""" model_type = "blip" def __init__( self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, image_text_hidden_size=256, **kwargs, ): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values.") if vision_config is None: vision_config = {} logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.") self.text_config = BlipTextConfig(**text_config) self.vision_config = BlipVisionConfig(**vision_config) self.text_config.encoder_hidden_size = self.vision_config.hidden_size self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = 1.0 self.initializer_range = 0.02 self.image_text_hidden_size = image_text_hidden_size @classmethod def from_text_vision_configs(cls, text_config: BlipTextConfig, vision_config: BlipVisionConfig, **kwargs): r""" Instantiate a [`BlipConfig`] (or a derived class) from blip text model configuration and blip vision model configuration. Returns: [`BlipConfig`]: An instance of a configuration object """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
transformers/src/transformers/models/blip/configuration_blip.py/0
{ "file_path": "transformers/src/transformers/models/blip/configuration_blip.py", "repo_id": "transformers", "token_count": 6358 }
314
# coding=utf-8 # Copyright 2022 HuggingFace Inc. team and BigScience workshop. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BLOOM model.""" import math import warnings from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss from torch.nn import functional as F from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_bloom import BloomConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "bigscience/bloom-560m" _CONFIG_FOR_DOC = "BloomConfig" BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "bigscience/bigscience-small-testing", "bigscience/bloom-560m", "bigscience/bloom-1b1", "bigscience/bloom-1b7", "bigscience/bloom-3b", "bigscience/bloom-7b1", "bigscience/bloom", ] def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: """ Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value `softmax(l+a) = softmax(l)`. Based on https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742 TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly. Args: Returns tensor shaped (batch_size * num_heads, 1, max_seq_len) attention_mask (`torch.Tensor`): Token-wise attention mask, this should be of shape (batch_size, max_seq_len). num_heads (`int`, *required*): number of heads dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`): dtype of the output tensor """ batch_size, seq_length = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor( 2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32 ) powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = torch.tensor( 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32 ) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) # Note: alibi will added to the attention bias that will be applied to the query, key product of attention # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length) # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length) # => the query_length dimension will then be broadcasted correctly # This is more or less identical to T5's relative position bias: # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527 arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] alibi = slopes[..., None] * arange_tensor return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype) def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor: """ Dropout add function Args: x (`torch.tensor`, *required*): input tensor residual (`torch.tensor`, *required*): residual tensor prob (`float`, *required*): dropout probability training (`bool`, *required*): training mode """ out = F.dropout(x, p=prob, training=training) out = residual + out return out def bloom_gelu_forward(x: torch.Tensor) -> torch.Tensor: """ Custom bias GELU function. Adapted from Megatron-DeepSpeed code. Here we use a simple implementation (inference) to make the model jitable. Args: x (`torch.tensor`, *required*): input hidden states """ return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) def bloom_gelu_back(g: torch.Tensor, x: torch.Tensor) -> torch.Tensor: """ gradient of tanh approximation of gelu gradient of actual gelu is: 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) Args: g (`torch.tensor`, *required*): gradient output tensor x (`torch.tensor`, *required*): input tensor """ x = x[0] # x is a tuple of 1 element, needs to unpack it first tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)) # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243 ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out) return ff * g class GeLUFunction(torch.autograd.Function): @staticmethod def forward(ctx, input: torch.Tensor) -> torch.Tensor: ctx.save_for_backward(input) return bloom_gelu_forward(input) @staticmethod def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: input = ctx.saved_tensors tmp = bloom_gelu_back(grad_output, input) return tmp class BloomGelu(nn.Module): """ BloomBiasGelu wrapper function that make use of the simple function on inference mode to make the model torchscriptable and use the autograd function in training mode to get the accurate results of the gradients Partly copied from Megatron-DeepSpeed code and adapted for our needs See here why autograd functions are not torchscriptable: https://github.com/pytorch/pytorch/issues/22329 """ def __init__(self): super().__init__() def forward(self, x: torch.Tensor) -> torch.Tensor: if self.training: return GeLUFunction.apply(x) else: return bloom_gelu_forward(x) class BloomAttention(nn.Module): def __init__(self, config: BloomConfig): super().__init__() self.pretraining_tp = config.pretraining_tp self.slow_but_exact = config.slow_but_exact self.hidden_size = config.hidden_size self.num_heads = config.n_head self.head_dim = self.hidden_size // self.num_heads self.split_size = self.hidden_size self.hidden_dropout = config.hidden_dropout if self.head_dim * self.num_heads != self.hidden_size: raise ValueError( f"`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:" f" {self.num_heads})." ) # Layer-wise attention scaling self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim) self.beta = 1.0 self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True) self.dense = nn.Linear(self.hidden_size, self.hidden_size) self.attention_dropout = nn.Dropout(config.attention_dropout) def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory storage as `fused_qkv` Args: fused_qkv (`torch.tensor`, *required*): [batch_size, seq_length, num_heads * 3 * head_dim] Returns: query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim] value: [batch_size, seq_length, num_heads, head_dim] """ batch_size, seq_length, three_times_hidden_size = fused_qkv.shape fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim) return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :] def _merge_heads(self, x: torch.Tensor) -> torch.Tensor: """ Merge heads together over the last dimension Args: x (`torch.tensor`, *required*): [batch_size * num_heads, seq_length, head_dim] Returns: torch.tensor: [batch_size, seq_length, num_heads * head_dim] """ # What we want to achieve is: # batch_size * num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads * head_dim batch_size_and_num_heads, seq_length, _ = x.shape batch_size = batch_size_and_num_heads // self.num_heads # First view to decompose the batch size # batch_size * num_heads, seq_length, head_dim -> batch_size, num_heads, seq_length, head_dim x = x.view(batch_size, self.num_heads, seq_length, self.head_dim) # batch_size, num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads, head_dim x = x.permute(0, 2, 1, 3) # batch_size, seq_length, num_heads, head_dim -> batch_size, seq_length, num_heads * head_dim return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim) def forward( self, hidden_states: torch.Tensor, residual: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, ): fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] # 3 x [batch_size, seq_length, num_heads, head_dim] (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv) batch_size, q_length, _, _ = query_layer.shape query_layer = query_layer.transpose(1, 2).reshape(batch_size * self.num_heads, q_length, self.head_dim) key_layer = key_layer.permute(0, 2, 3, 1).reshape(batch_size * self.num_heads, self.head_dim, q_length) value_layer = value_layer.transpose(1, 2).reshape(batch_size * self.num_heads, q_length, self.head_dim) if layer_past is not None: past_key, past_value = layer_past # concatenate along seq_length dimension: # - key: [batch_size * self.num_heads, head_dim, kv_length] # - value: [batch_size * self.num_heads, kv_length, head_dim] key_layer = torch.cat((past_key, key_layer), dim=2) value_layer = torch.cat((past_value, value_layer), dim=1) _, _, kv_length = key_layer.shape if use_cache is True: present = (key_layer, value_layer) else: present = None # [batch_size * num_heads, q_length, kv_length] # we use `torch.Tensor.baddbmm` instead of `torch.baddbmm` as the latter isn't supported by TorchScript v1.11 matmul_result = alibi.baddbmm( batch1=query_layer, batch2=key_layer, beta=self.beta, alpha=self.inv_norm_factor, ) # change view to [batch_size, num_heads, q_length, kv_length] attention_scores = matmul_result.view(batch_size, self.num_heads, q_length, kv_length) # cast attention scores to fp32, compute scaled softmax and cast back to initial dtype - [batch_size, num_heads, q_length, kv_length] input_dtype = attention_scores.dtype # `float16` has a minimum value of -65504.0, whereas `bfloat16` and `float32` have a minimum value of `-3.4e+38` if input_dtype == torch.float16: attention_scores = attention_scores.to(torch.float) attn_weights = torch.masked_fill(attention_scores, attention_mask, torch.finfo(attention_scores.dtype).min) attention_probs = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(input_dtype) # [batch_size, num_heads, q_length, kv_length] attention_probs = self.attention_dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask # change view [batch_size x num_heads, q_length, kv_length] attention_probs_reshaped = attention_probs.view(batch_size * self.num_heads, q_length, kv_length) # matmul: [batch_size * num_heads, q_length, head_dim] context_layer = torch.bmm(attention_probs_reshaped, value_layer) # change view [batch_size, q_length, num_heads * head_dim] context_layer = self._merge_heads(context_layer) # aggregate results across tp ranks. See here: https://github.com/pytorch/pytorch/issues/76232 if self.pretraining_tp > 1 and self.slow_but_exact: slices = self.hidden_size / self.pretraining_tp output_tensor = torch.zeros_like(context_layer) for i in range(self.pretraining_tp): output_tensor = output_tensor + F.linear( context_layer[:, :, int(i * slices) : int((i + 1) * slices)], self.dense.weight[:, int(i * slices) : int((i + 1) * slices)], ) else: output_tensor = self.dense(context_layer) output_tensor = dropout_add(output_tensor, residual, self.hidden_dropout, self.training) outputs = (output_tensor, present) if output_attentions: outputs += (attention_probs,) return outputs class BloomMLP(nn.Module): def __init__(self, config: BloomConfig): super().__init__() hidden_size = config.hidden_size self.pretraining_tp = config.pretraining_tp self.slow_but_exact = config.slow_but_exact self.dense_h_to_4h = nn.Linear(hidden_size, 4 * hidden_size) self.gelu_impl = BloomGelu() self.dense_4h_to_h = nn.Linear(4 * hidden_size, hidden_size) self.hidden_dropout = config.hidden_dropout def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor: hidden_states = self.gelu_impl(self.dense_h_to_4h(hidden_states)) if self.pretraining_tp > 1 and self.slow_but_exact: intermediate_output = torch.zeros_like(residual) slices = self.dense_4h_to_h.weight.shape[-1] / self.pretraining_tp for i in range(self.pretraining_tp): intermediate_output = intermediate_output + F.linear( hidden_states[:, :, int(i * slices) : int((i + 1) * slices)], self.dense_4h_to_h.weight[:, int(i * slices) : int((i + 1) * slices)], ) else: intermediate_output = self.dense_4h_to_h(hidden_states) output = dropout_add(intermediate_output, residual, self.hidden_dropout, self.training) return output class BloomBlock(nn.Module): def __init__(self, config: BloomConfig): super().__init__() hidden_size = config.hidden_size self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.num_heads = config.n_head self.self_attention = BloomAttention(config) self.post_attention_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = BloomMLP(config) self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm self.hidden_dropout = config.hidden_dropout def forward( self, hidden_states: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, ): # hidden_states: [batch_size, seq_length, hidden_size] # Layer norm at the beginning of the transformer layer. layernorm_output = self.input_layernorm(hidden_states) # Layer norm post the self attention. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states # Self attention. attn_outputs = self.self_attention( layernorm_output, residual, layer_past=layer_past, attention_mask=attention_mask, alibi=alibi, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attention_output = attn_outputs[0] outputs = attn_outputs[1:] layernorm_output = self.post_attention_layernorm(attention_output) # Get residual if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = attention_output # MLP. output = self.mlp(layernorm_output, residual) if use_cache: outputs = (output,) + outputs else: outputs = (output,) + outputs[1:] return outputs # hidden_states, present, attentions class BloomPreTrainedModel(PreTrainedModel): config_class = BloomConfig base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["BloomBlock"] _skip_keys_device_placement = "past_key_values" def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module: nn.Module): """Initialize the weights.""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @staticmethod def _convert_to_standard_cache( past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], batch_size: int ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: """ Standardizes the format of the cache so as to match most implementations, i.e. to tuple(tuple([batch_size, num_heads, ...])) """ batch_size_times_num_heads, head_dim, seq_length = past_key_value[0][0].shape num_heads = batch_size_times_num_heads // batch_size # key: [batch_size * num_heads, head_dim, seq_length] -> [batch_size, num_heads, head_dim, seq_length] # value: [batch_size * num_heads, seq_length, head_dim] -> [batch_size, num_heads, seq_length, head_dim] return tuple( ( layer_past[0].view(batch_size, num_heads, head_dim, seq_length), layer_past[1].view(batch_size, num_heads, seq_length, head_dim), ) for layer_past in past_key_value ) @staticmethod def _convert_to_bloom_cache( past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: """ Converts the cache to the format expected by Bloom, i.e. to tuple(tuple([batch_size * num_heads, ...])) """ batch_size, num_heads, head_dim, seq_length = past_key_value[0][0].shape batch_size_times_num_heads = batch_size * num_heads # key: [batch_size, num_heads, head_dim, seq_length] -> [batch_size * num_heads, head_dim, seq_length] # value: [batch_size, num_heads, seq_length, head_dim] -> [batch_size * num_heads, seq_length, head_dim] return tuple( ( layer_past[0].view(batch_size_times_num_heads, head_dim, seq_length), layer_past[1].view(batch_size_times_num_heads, seq_length, head_dim), ) for layer_past in past_key_value ) BLOOM_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BloomConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BLOOM_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`): Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have their past given to this model should not be passed as `input_ids` as they have already been computed. Each element of `past_key_values` is a tuple (past_key, past_value): - past_key: [batch_size * num_heads, head_dim, kv_length] - past_value: [batch_size * num_heads, kv_length, head_dim] attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see `past_key_values`). use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.", BLOOM_START_DOCSTRING, ) class BloomModel(BloomPreTrainedModel): def __init__(self, config: BloomConfig): super().__init__(config) self.embed_dim = config.hidden_size self.num_heads = config.n_head # Embedding + LN Embedding self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim) self.word_embeddings_layernorm = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) # Transformer blocks self.h = nn.ModuleList([BloomBlock(config) for _ in range(config.num_hidden_layers)]) # Final Layer Norm self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def build_alibi_tensor(self, attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: return build_alibi_tensor(attention_mask, num_heads, dtype) def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, new_embeddings: torch.Tensor): self.word_embeddings = new_embeddings @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **deprecated_arguments, ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: if deprecated_arguments.pop("position_ids", False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None` warnings.warn( "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore" " passing `position_ids`.", FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError(f"Got unexpected arguments: {deprecated_arguments}") output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") if past_key_values is None: past_key_values = tuple([None] * len(self.h)) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape batch_size x num_heads x N x N # head_mask has shape n_layer x batch x num_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) hidden_states = self.word_embeddings_layernorm(inputs_embeds) presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # Compute alibi tensor: check build_alibi_tensor documentation seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values[0] is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device) else: attention_mask = attention_mask.to(hidden_states.device) alibi = self.build_alibi_tensor(attention_mask, self.num_heads, dtype=hidden_states.dtype) causal_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape=(batch_size, seq_length), inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) causal_mask = causal_mask.bool() for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( block.__call__, hidden_states, alibi, causal_mask, layer_past, head_mask[i], use_cache, output_attentions, ) else: outputs = block( hidden_states, layer_past=layer_past, attention_mask=causal_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) # Add last hidden state hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @add_start_docstrings( """ The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, BLOOM_START_DOCSTRING, ) class BloomForCausalLM(BloomPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: BloomConfig): super().__init__(config) self.transformer = BloomModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings: torch.Tensor): self.lm_head = new_embeddings def prepare_inputs_for_generation( self, input_ids: torch.LongTensor, past_key_values: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, **kwargs, ) -> dict: # only last tokens for input_ids if past is not None if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] # the cache may be in the stardard format (e.g. in contrastive search), convert to bloom's format if needed if past_key_values[0][0].shape[0] == input_ids.shape[0]: past_key_values = self._convert_to_bloom_cache(past_key_values) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **deprecated_arguments, ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ if deprecated_arguments.pop("position_ids", False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None` warnings.warn( "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore" " passing `position_ids`.", FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError(f"Got unexpected arguments: {deprecated_arguments}") return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() batch_size, seq_length, vocab_size = shift_logits.shape # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct( shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length) ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def _reorder_cache( self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: """ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step. Output shares the same memory storage as `past`. """ standardized_past = self._convert_to_standard_cache(past, batch_size=len(beam_idx)) # Get a copy of `beam_idx` on all the devices where we need those indices. device_to_beam_idx = { past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past } reordered_past = tuple( ( layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]), layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device]), ) for layer_past in standardized_past ) return self._convert_to_bloom_cache(reordered_past) @add_start_docstrings( """ The Bloom Model transformer with a sequence classification head on top (linear layer). [`BloomForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, BLOOM_START_DOCSTRING, ) class BloomForSequenceClassification(BloomPreTrainedModel): def __init__(self, config: BloomConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = BloomModel(config) self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **deprecated_arguments, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ if deprecated_arguments.pop("position_ids", False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None` warnings.warn( "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore" " passing `position_ids`.", FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError(f"Got unexpected arguments: {deprecated_arguments}") return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ Bloom Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, BLOOM_START_DOCSTRING, ) class BloomForTokenClassification(BloomPreTrainedModel): def __init__(self, config: BloomConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = BloomModel(config) if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **deprecated_arguments, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ if deprecated_arguments.pop("position_ids", False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None` warnings.warn( "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore" " passing `position_ids`.", FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError(f"Got unexpected arguments: {deprecated_arguments}") return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) batch_size, seq_length = labels.shape loss_fct = CrossEntropyLoss() loss = loss_fct( logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length) ) if not return_dict: output = (logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ The BLOOM Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BLOOM_START_DOCSTRING, ) class BloomForQuestionAnswering(BloomPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = BloomModel(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/bloom/modeling_bloom.py/0
{ "file_path": "transformers/src/transformers/models/bloom/modeling_bloom.py", "repo_id": "transformers", "token_count": 23801 }
315
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = { "configuration_camembert": ["CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CamembertConfig", "CamembertOnnxConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_camembert"] = ["CamembertTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_camembert_fast"] = ["CamembertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_camembert"] = [ "CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "CamembertForCausalLM", "CamembertForMaskedLM", "CamembertForMultipleChoice", "CamembertForQuestionAnswering", "CamembertForSequenceClassification", "CamembertForTokenClassification", "CamembertModel", "CamembertPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_camembert"] = [ "TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCamembertForCausalLM", "TFCamembertForMaskedLM", "TFCamembertForMultipleChoice", "TFCamembertForQuestionAnswering", "TFCamembertForSequenceClassification", "TFCamembertForTokenClassification", "TFCamembertModel", "TFCamembertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig, CamembertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_camembert import CamembertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_camembert_fast import CamembertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_camembert import ( CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, CamembertForCausalLM, CamembertForMaskedLM, CamembertForMultipleChoice, CamembertForQuestionAnswering, CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, CamembertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_camembert import ( TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFCamembertForCausalLM, TFCamembertForMaskedLM, TFCamembertForMultipleChoice, TFCamembertForQuestionAnswering, TFCamembertForSequenceClassification, TFCamembertForTokenClassification, TFCamembertModel, TFCamembertPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/camembert/__init__.py/0
{ "file_path": "transformers/src/transformers/models/camembert/__init__.py", "repo_id": "transformers", "token_count": 1824 }
316
# coding=utf-8 # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Chinese-CLIP model.""" import math from dataclasses import dataclass from typing import Any, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "OFA-Sys/chinese-clip-vit-base-patch16" _CONFIG_FOR_DOC = "ChineseCLIPConfig" CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [ "OFA-Sys/chinese-clip-vit-base-patch16", # See all Chinese-CLIP models at https://huggingface.co/models?filter=chinese_clip ] # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html # Copied from transformers.models.clip.modeling_clip.contrastive_loss def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def chinese_clip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @dataclass class ChineseCLIPOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`ChineseCLIPTextModel`]. image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`ChineseCLIPVisionModel`]. text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): The output of the [`ChineseCLIPTextModel`]. vision_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): The output of the [`ChineseCLIPVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) # Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->ChineseCLIPText class ChineseCLIPTextEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->ChineseCLIP class ChineseCLIPVisionEmbeddings(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ChineseCLIPText class ChineseCLIPTextSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in ChineseCLIPTextModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->ChineseCLIPText class ChineseCLIPTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ChineseCLIPText class ChineseCLIPTextAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = ChineseCLIPTextSelfAttention(config, position_embedding_type=position_embedding_type) self.output = ChineseCLIPTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class ChineseCLIPVisionAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->ChineseCLIPText class ChineseCLIPTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->ChineseCLIPText class ChineseCLIPTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->ChineseCLIPVision class ChineseCLIPVisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ChineseCLIPText class ChineseCLIPTextLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ChineseCLIPTextAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = ChineseCLIPTextAttention(config, position_embedding_type="absolute") self.intermediate = ChineseCLIPTextIntermediate(config) self.output = ChineseCLIPTextOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class ChineseCLIPVisionLayer(nn.Module): def __init__(self, config: ChineseCLIPConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = ChineseCLIPVisionAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = ChineseCLIPVisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ChineseCLIPText class ChineseCLIPTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class ChineseCLIPPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ChineseCLIPConfig base_model_prefix = "chinese_clip" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, ChineseCLIPVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, ChineseCLIPTextEmbeddings): nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range) for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]: if embedding.padding_idx is not None: embedding.weight.data[embedding.padding_idx].zero_() elif isinstance(module, ChineseCLIPVisionAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, ChineseCLIPVisionMLP): factor = self.config.initializer_factor in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, ChineseCLIPModel): nn.init.normal_( module.text_projection.weight, std=module.text_embed_dim**-0.5 * self.config.initializer_factor, ) nn.init.normal_( module.visual_projection.weight, std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, ) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() CHINESE_CLIP_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ChineseCLIPConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CHINESE_CLIP_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ CHINESE_CLIP_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ CHINESE_CLIP_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ChineseCLIPText class ChineseCLIPTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class ChineseCLIPVisionEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`ChineseCLIPVisionEncoderLayer`]. Args: config: ChineseCLIPConfig """ def __init__(self, config: ChineseCLIPConfig): super().__init__() self.config = config self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class ChineseCLIPVisionTransformer(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = ChineseCLIPVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = ChineseCLIPVisionEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The text model from CHINESE_CLIP without any head or projection on top.", CHINESE_CLIP_START_DOCSTRING, ) class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ config_class = ChineseCLIPTextConfig def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = ChineseCLIPTextEmbeddings(config) self.encoder = ChineseCLIPTextEncoder(config) self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings( """The vision model from CHINESE_CLIP without any head or projection on top.""", CHINESE_CLIP_START_DOCSTRING, ) class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel): config_class = ChineseCLIPVisionConfig main_input_name = "pixel_values" def __init__(self, config: ChineseCLIPVisionConfig): super().__init__(config) self.vision_model = ChineseCLIPVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import CLIPProcessor, ChineseCLIPVisionModel >>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) @add_start_docstrings(CHINESE_CLIP_START_DOCSTRING) class ChineseCLIPModel(ChineseCLIPPreTrainedModel): config_class = ChineseCLIPConfig def __init__(self, config: ChineseCLIPConfig): super().__init__(config) if not isinstance(config.text_config, ChineseCLIPTextConfig): raise ValueError( "config.text_config is expected to be of type ChineseCLIPTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, ChineseCLIPVisionConfig): raise ValueError( "config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False) self.vision_model = ChineseCLIPVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CHINESE_CLIP_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the final [CLS] hidden state of Text-Transformer. Examples: ```python >>> from transformers import AutoTokenizer, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) ```""" # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[0][:, 0, :] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the final [CLS] hidden state of Vision-Transformer. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) ```""" # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ChineseCLIPOutput, config_class=ChineseCLIPConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ChineseCLIPOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[0][:, 0, :] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = chinese_clip_loss(logits_per_text) if not return_dict: # fix the None pooled_output of text_outputs to conform with dict_output pooled_output = text_outputs[1] if pooled_output is None: text_outputs = (text_outputs[0],) + text_outputs[2:] output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return ChineseCLIPOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, )
transformers/src/transformers/models/chinese_clip/modeling_chinese_clip.py/0
{ "file_path": "transformers/src/transformers/models/chinese_clip/modeling_chinese_clip.py", "repo_id": "transformers", "token_count": 30797 }
317
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ConvBERT checkpoint.""" import argparse from transformers import ConvBertConfig, ConvBertModel, TFConvBertModel, load_tf_weights_in_convbert from transformers.utils import logging logging.set_verbosity_info() def convert_orig_tf1_checkpoint_to_pytorch(tf_checkpoint_path, convbert_config_file, pytorch_dump_path): conf = ConvBertConfig.from_json_file(convbert_config_file) model = ConvBertModel(conf) model = load_tf_weights_in_convbert(model, conf, tf_checkpoint_path) model.save_pretrained(pytorch_dump_path) tf_model = TFConvBertModel.from_pretrained(pytorch_dump_path, from_pt=True) tf_model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--convbert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ConvBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_orig_tf1_checkpoint_to_pytorch(args.tf_checkpoint_path, args.convbert_config_file, args.pytorch_dump_path)
transformers/src/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py/0
{ "file_path": "transformers/src/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py", "repo_id": "transformers", "token_count": 749 }
318
# coding=utf-8 # Copyright 2023 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 ConvNextV2 model.""" from __future__ import annotations from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPooling, TFBaseModelOutputWithPoolingAndNoAttention, TFImageClassifierOutputWithNoAttention, ) from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_convnextv2 import ConvNextV2Config logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "ConvNextV2Config" # Base docstring _CHECKPOINT_FOR_DOC = "facebook/convnextv2-tiny-1k-224" _EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "facebook/convnextv2-tiny-1k-224" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/convnextv2-tiny-1k-224", # See all ConvNextV2 models at https://huggingface.co/models?filter=convnextv2 ] # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextDropPath with ConvNext->ConvNextV2 class TFConvNextV2DropPath(keras.layers.Layer): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). References: (1) github.com:rwightman/pytorch-image-models """ def __init__(self, drop_path: float, **kwargs): super().__init__(**kwargs) self.drop_path = drop_path def call(self, x: tf.Tensor, training=None): if training: keep_prob = 1 - self.drop_path shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1) random_tensor = keep_prob + tf.random.uniform(shape, 0, 1) random_tensor = tf.floor(random_tensor) return (x / keep_prob) * random_tensor return x class TFConvNextV2GRN(keras.layers.Layer): """GRN (Global Response Normalization) layer""" def __init__(self, config: ConvNextV2Config, dim: int, **kwargs): super().__init__(**kwargs) self.dim = dim def build(self, input_shape: tf.TensorShape = None): # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa) self.weight = self.add_weight( name="weight", shape=(1, 1, 1, self.dim), initializer=keras.initializers.Zeros(), ) self.bias = self.add_weight( name="bias", shape=(1, 1, 1, self.dim), initializer=keras.initializers.Zeros(), ) return super().build(input_shape) def call(self, hidden_states: tf.Tensor): global_features = tf.norm(hidden_states, ord="euclidean", axis=(1, 2), keepdims=True) norm_features = global_features / (tf.reduce_mean(global_features, axis=-1, keepdims=True) + 1e-6) hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states return hidden_states # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextEmbeddings with ConvNext->ConvNextV2 class TFConvNextV2Embeddings(keras.layers.Layer): """This class is comparable to (and inspired by) the SwinEmbeddings class found in src/transformers/models/swin/modeling_swin.py. """ def __init__(self, config: ConvNextV2Config, **kwargs): super().__init__(**kwargs) self.patch_embeddings = keras.layers.Conv2D( filters=config.hidden_sizes[0], kernel_size=config.patch_size, strides=config.patch_size, name="patch_embeddings", kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), ) self.layernorm = keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm") self.num_channels = config.num_channels self.config = config def call(self, pixel_values): if isinstance(pixel_values, dict): pixel_values = pixel_values["pixel_values"] tf.debugging.assert_equal( shape_list(pixel_values)[1], self.num_channels, message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.", ) # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) embeddings = self.patch_embeddings(pixel_values) embeddings = self.layernorm(embeddings) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "patch_embeddings", None) is not None: with tf.name_scope(self.patch_embeddings.name): self.patch_embeddings.build([None, None, None, self.config.num_channels]) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, None, self.config.hidden_sizes[0]]) class TFConvNextV2Layer(keras.layers.Layer): """This corresponds to the `Block` class in the original implementation. There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C, H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow NHWC ordering, we can just apply the operations straight-away without the permutation. Args: config (`ConvNextV2Config`): Model configuration class. dim (`int`): Number of input channels. drop_path (`float`, defaults to 0.0): Stochastic depth rate. """ def __init__(self, config: ConvNextV2Config, dim: int, drop_path: float = 0.0, **kwargs): super().__init__(**kwargs) self.dim = dim self.config = config self.dwconv = keras.layers.Conv2D( filters=dim, kernel_size=7, padding="same", groups=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name="dwconv", ) # depthwise conv self.layernorm = keras.layers.LayerNormalization( epsilon=1e-6, name="layernorm", ) self.pwconv1 = keras.layers.Dense( units=4 * dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name="pwconv1", ) # pointwise/1x1 convs, implemented with linear layers self.act = get_tf_activation(config.hidden_act) self.grn = TFConvNextV2GRN(config, 4 * dim, dtype=tf.float32, name="grn") self.pwconv2 = keras.layers.Dense( units=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name="pwconv2", ) # Using `layers.Activation` instead of `tf.identity` to better control `training` # behaviour. self.drop_path = ( TFConvNextV2DropPath(drop_path, name="drop_path") if drop_path > 0.0 else keras.layers.Activation("linear", name="drop_path") ) def call(self, hidden_states, training=False): input = hidden_states x = self.dwconv(hidden_states) x = self.layernorm(x) x = self.pwconv1(x) x = self.act(x) x = self.grn(x) x = self.pwconv2(x) x = self.drop_path(x, training=training) x = input + x return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dwconv", None) is not None: with tf.name_scope(self.dwconv.name): self.dwconv.build([None, None, None, self.dim]) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, None, self.dim]) if getattr(self, "pwconv1", None) is not None: with tf.name_scope(self.pwconv1.name): self.pwconv1.build([None, None, self.dim]) if getattr(self, "grn", None) is not None: with tf.name_scope(self.grn.name): self.grn.build(None) if getattr(self, "pwconv2", None) is not None: with tf.name_scope(self.pwconv2.name): self.pwconv2.build([None, None, 4 * self.dim]) if getattr(self, "drop_path", None) is not None: with tf.name_scope(self.drop_path.name): self.drop_path.build(None) # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextStage with ConvNext->ConvNextV2 class TFConvNextV2Stage(keras.layers.Layer): """ConvNextV2 stage, consisting of an optional downsampling layer + multiple residual blocks. Args: config (`ConvNextV2V2Config`): Model configuration class. in_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. depth (`int`): Number of residual blocks. drop_path_rates(`List[float]`): Stochastic depth rates for each layer. """ def __init__( self, config: ConvNextV2Config, in_channels: int, out_channels: int, kernel_size: int = 2, stride: int = 2, depth: int = 2, drop_path_rates: Optional[List[float]] = None, **kwargs, ): super().__init__(**kwargs) if in_channels != out_channels or stride > 1: self.downsampling_layer = [ keras.layers.LayerNormalization( epsilon=1e-6, name="downsampling_layer.0", ), # Inputs to this layer will follow NHWC format since we # transposed the inputs from NCHW to NHWC in the `TFConvNextV2Embeddings` # layer. All the outputs throughout the model will be in NHWC # from this point on until the output where we again change to # NCHW. keras.layers.Conv2D( filters=out_channels, kernel_size=kernel_size, strides=stride, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name="downsampling_layer.1", ), ] else: self.downsampling_layer = [tf.identity] drop_path_rates = drop_path_rates or [0.0] * depth self.layers = [ TFConvNextV2Layer( config, dim=out_channels, drop_path=drop_path_rates[j], name=f"layers.{j}", ) for j in range(depth) ] self.in_channels = in_channels self.out_channels = out_channels self.stride = stride def call(self, hidden_states): for layer in self.downsampling_layer: hidden_states = layer(hidden_states) for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) if self.in_channels != self.out_channels or self.stride > 1: with tf.name_scope(self.downsampling_layer[0].name): self.downsampling_layer[0].build([None, None, None, self.in_channels]) with tf.name_scope(self.downsampling_layer[1].name): self.downsampling_layer[1].build([None, None, None, self.in_channels]) class TFConvNextV2Encoder(keras.layers.Layer): def __init__(self, config: ConvNextV2Config, **kwargs): super().__init__(**kwargs) self.stages = [] drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths)) drop_path_rates = tf.split(drop_path_rates, config.depths) drop_path_rates = [x.numpy().tolist() for x in drop_path_rates] prev_chs = config.hidden_sizes[0] for i in range(config.num_stages): out_chs = config.hidden_sizes[i] stage = TFConvNextV2Stage( config, in_channels=prev_chs, out_channels=out_chs, stride=2 if i > 0 else 1, depth=config.depths[i], drop_path_rates=drop_path_rates[i], name=f"stages.{i}", ) self.stages.append(stage) prev_chs = out_chs def call( self, hidden_states: tf.Tensor, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, TFBaseModelOutputWithNoAttention]: all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.stages): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states) def build(self, input_shape=None): for stage in self.stages: with tf.name_scope(stage.name): stage.build(None) @keras_serializable class TFConvNextV2MainLayer(keras.layers.Layer): config_class = ConvNextV2Config def __init__(self, config: ConvNextV2Config, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFConvNextV2Embeddings(config, name="embeddings") self.encoder = TFConvNextV2Encoder(config, name="encoder") self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") # We are setting the `data_format` like so because from here on we will revert to the # NCHW output format self.pooler = keras.layers.GlobalAvgPool2D(data_format="channels_last") @unpack_inputs def call( self, pixel_values: TFModelInputType | None = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values, training=training) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) last_hidden_state = encoder_outputs[0] # Change to NCHW output format have uniformity in the modules pooled_output = self.pooler(last_hidden_state) last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2)) pooled_output = self.layernorm(pooled_output) # Change the other hidden state outputs to NCHW as well if output_hidden_states: hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: hidden_states = hidden_states if output_hidden_states else () return (last_hidden_state, pooled_output) + hidden_states return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, self.config.hidden_sizes[-1]]) class TFConvNextV2PreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ConvNextV2Config base_model_prefix = "convnextv2" main_input_name = "pixel_values" CONVNEXTV2_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ CONVNEXTV2_INPUTS_DOCSTRING = r""" Args: pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to `True`. """ @add_start_docstrings( "The bare ConvNextV2 model outputting raw features without any specific head on top.", CONVNEXTV2_START_DOCSTRING, ) class TFConvNextV2Model(TFConvNextV2PreTrainedModel): def __init__(self, config: ConvNextV2Config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2") @unpack_inputs @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, pixel_values: TFModelInputType | None = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") outputs = self.convnextv2( pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return outputs[:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convnextv2", None) is not None: with tf.name_scope(self.convnextv2.name): self.convnextv2.build(None) @add_start_docstrings( """ ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, CONVNEXTV2_START_DOCSTRING, ) class TFConvNextV2ForImageClassification(TFConvNextV2PreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: ConvNextV2Config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2") # Classifier head self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name="classifier", ) @unpack_inputs @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def call( self, pixel_values: TFModelInputType | None = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFImageClassifierOutputWithNoAttention, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") outputs = self.convnextv2( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convnextv2", None) is not None: with tf.name_scope(self.convnextv2.name): self.convnextv2.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_sizes[-1]])
transformers/src/transformers/models/convnextv2/modeling_tf_convnextv2.py/0
{ "file_path": "transformers/src/transformers/models/convnextv2/modeling_tf_convnextv2.py", "repo_id": "transformers", "token_count": 11969 }
319
# coding=utf-8 # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch CvT model.""" import collections.abc from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ImageClassifierOutputWithNoAttention, ModelOutput from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import logging from .configuration_cvt import CvtConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "CvtConfig" # Base docstring _CHECKPOINT_FOR_DOC = "microsoft/cvt-13" _EXPECTED_OUTPUT_SHAPE = [1, 384, 14, 14] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "microsoft/cvt-13" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" CVT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/cvt-13", "microsoft/cvt-13-384", "microsoft/cvt-13-384-22k", "microsoft/cvt-21", "microsoft/cvt-21-384", "microsoft/cvt-21-384-22k", # See all Cvt models at https://huggingface.co/models?filter=cvt ] @dataclass class BaseModelOutputWithCLSToken(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. cls_token_value (`torch.FloatTensor` of shape `(batch_size, 1, hidden_size)`): Classification token at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. """ last_hidden_state: torch.FloatTensor = None cls_token_value: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath class CvtDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) class CvtEmbeddings(nn.Module): """ Construct the CvT embeddings. """ def __init__(self, patch_size, num_channels, embed_dim, stride, padding, dropout_rate): super().__init__() self.convolution_embeddings = CvtConvEmbeddings( patch_size=patch_size, num_channels=num_channels, embed_dim=embed_dim, stride=stride, padding=padding ) self.dropout = nn.Dropout(dropout_rate) def forward(self, pixel_values): hidden_state = self.convolution_embeddings(pixel_values) hidden_state = self.dropout(hidden_state) return hidden_state class CvtConvEmbeddings(nn.Module): """ Image to Conv Embedding. """ def __init__(self, patch_size, num_channels, embed_dim, stride, padding): super().__init__() patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) self.patch_size = patch_size self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=stride, padding=padding) self.normalization = nn.LayerNorm(embed_dim) def forward(self, pixel_values): pixel_values = self.projection(pixel_values) batch_size, num_channels, height, width = pixel_values.shape hidden_size = height * width # rearrange "b c h w -> b (h w) c" pixel_values = pixel_values.view(batch_size, num_channels, hidden_size).permute(0, 2, 1) if self.normalization: pixel_values = self.normalization(pixel_values) # rearrange "b (h w) c" -> b c h w" pixel_values = pixel_values.permute(0, 2, 1).view(batch_size, num_channels, height, width) return pixel_values class CvtSelfAttentionConvProjection(nn.Module): def __init__(self, embed_dim, kernel_size, padding, stride): super().__init__() self.convolution = nn.Conv2d( embed_dim, embed_dim, kernel_size=kernel_size, padding=padding, stride=stride, bias=False, groups=embed_dim, ) self.normalization = nn.BatchNorm2d(embed_dim) def forward(self, hidden_state): hidden_state = self.convolution(hidden_state) hidden_state = self.normalization(hidden_state) return hidden_state class CvtSelfAttentionLinearProjection(nn.Module): def forward(self, hidden_state): batch_size, num_channels, height, width = hidden_state.shape hidden_size = height * width # rearrange " b c h w -> b (h w) c" hidden_state = hidden_state.view(batch_size, num_channels, hidden_size).permute(0, 2, 1) return hidden_state class CvtSelfAttentionProjection(nn.Module): def __init__(self, embed_dim, kernel_size, padding, stride, projection_method="dw_bn"): super().__init__() if projection_method == "dw_bn": self.convolution_projection = CvtSelfAttentionConvProjection(embed_dim, kernel_size, padding, stride) self.linear_projection = CvtSelfAttentionLinearProjection() def forward(self, hidden_state): hidden_state = self.convolution_projection(hidden_state) hidden_state = self.linear_projection(hidden_state) return hidden_state class CvtSelfAttention(nn.Module): def __init__( self, num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, with_cls_token=True, **kwargs, ): super().__init__() self.scale = embed_dim**-0.5 self.with_cls_token = with_cls_token self.embed_dim = embed_dim self.num_heads = num_heads self.convolution_projection_query = CvtSelfAttentionProjection( embed_dim, kernel_size, padding_q, stride_q, projection_method="linear" if qkv_projection_method == "avg" else qkv_projection_method, ) self.convolution_projection_key = CvtSelfAttentionProjection( embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method ) self.convolution_projection_value = CvtSelfAttentionProjection( embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method ) self.projection_query = nn.Linear(embed_dim, embed_dim, bias=qkv_bias) self.projection_key = nn.Linear(embed_dim, embed_dim, bias=qkv_bias) self.projection_value = nn.Linear(embed_dim, embed_dim, bias=qkv_bias) self.dropout = nn.Dropout(attention_drop_rate) def rearrange_for_multi_head_attention(self, hidden_state): batch_size, hidden_size, _ = hidden_state.shape head_dim = self.embed_dim // self.num_heads # rearrange 'b t (h d) -> b h t d' return hidden_state.view(batch_size, hidden_size, self.num_heads, head_dim).permute(0, 2, 1, 3) def forward(self, hidden_state, height, width): if self.with_cls_token: cls_token, hidden_state = torch.split(hidden_state, [1, height * width], 1) batch_size, hidden_size, num_channels = hidden_state.shape # rearrange "b (h w) c -> b c h w" hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width) key = self.convolution_projection_key(hidden_state) query = self.convolution_projection_query(hidden_state) value = self.convolution_projection_value(hidden_state) if self.with_cls_token: query = torch.cat((cls_token, query), dim=1) key = torch.cat((cls_token, key), dim=1) value = torch.cat((cls_token, value), dim=1) head_dim = self.embed_dim // self.num_heads query = self.rearrange_for_multi_head_attention(self.projection_query(query)) key = self.rearrange_for_multi_head_attention(self.projection_key(key)) value = self.rearrange_for_multi_head_attention(self.projection_value(value)) attention_score = torch.einsum("bhlk,bhtk->bhlt", [query, key]) * self.scale attention_probs = torch.nn.functional.softmax(attention_score, dim=-1) attention_probs = self.dropout(attention_probs) context = torch.einsum("bhlt,bhtv->bhlv", [attention_probs, value]) # rearrange"b h t d -> b t (h d)" _, _, hidden_size, _ = context.shape context = context.permute(0, 2, 1, 3).contiguous().view(batch_size, hidden_size, self.num_heads * head_dim) return context class CvtSelfOutput(nn.Module): """ The residual connection is defined in CvtLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, embed_dim, drop_rate): super().__init__() self.dense = nn.Linear(embed_dim, embed_dim) self.dropout = nn.Dropout(drop_rate) def forward(self, hidden_state, input_tensor): hidden_state = self.dense(hidden_state) hidden_state = self.dropout(hidden_state) return hidden_state class CvtAttention(nn.Module): def __init__( self, num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, drop_rate, with_cls_token=True, ): super().__init__() self.attention = CvtSelfAttention( num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, with_cls_token, ) self.output = CvtSelfOutput(embed_dim, drop_rate) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_state, height, width): self_output = self.attention(hidden_state, height, width) attention_output = self.output(self_output, hidden_state) return attention_output class CvtIntermediate(nn.Module): def __init__(self, embed_dim, mlp_ratio): super().__init__() self.dense = nn.Linear(embed_dim, int(embed_dim * mlp_ratio)) self.activation = nn.GELU() def forward(self, hidden_state): hidden_state = self.dense(hidden_state) hidden_state = self.activation(hidden_state) return hidden_state class CvtOutput(nn.Module): def __init__(self, embed_dim, mlp_ratio, drop_rate): super().__init__() self.dense = nn.Linear(int(embed_dim * mlp_ratio), embed_dim) self.dropout = nn.Dropout(drop_rate) def forward(self, hidden_state, input_tensor): hidden_state = self.dense(hidden_state) hidden_state = self.dropout(hidden_state) hidden_state = hidden_state + input_tensor return hidden_state class CvtLayer(nn.Module): """ CvtLayer composed by attention layers, normalization and multi-layer perceptrons (mlps). """ def __init__( self, num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, drop_rate, mlp_ratio, drop_path_rate, with_cls_token=True, ): super().__init__() self.attention = CvtAttention( num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, drop_rate, with_cls_token, ) self.intermediate = CvtIntermediate(embed_dim, mlp_ratio) self.output = CvtOutput(embed_dim, mlp_ratio, drop_rate) self.drop_path = CvtDropPath(drop_prob=drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_before = nn.LayerNorm(embed_dim) self.layernorm_after = nn.LayerNorm(embed_dim) def forward(self, hidden_state, height, width): self_attention_output = self.attention( self.layernorm_before(hidden_state), # in Cvt, layernorm is applied before self-attention height, width, ) attention_output = self_attention_output attention_output = self.drop_path(attention_output) # first residual connection hidden_state = attention_output + hidden_state # in Cvt, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_state) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_state) layer_output = self.drop_path(layer_output) return layer_output class CvtStage(nn.Module): def __init__(self, config, stage): super().__init__() self.config = config self.stage = stage if self.config.cls_token[self.stage]: self.cls_token = nn.Parameter(torch.randn(1, 1, self.config.embed_dim[-1])) self.embedding = CvtEmbeddings( patch_size=config.patch_sizes[self.stage], stride=config.patch_stride[self.stage], num_channels=config.num_channels if self.stage == 0 else config.embed_dim[self.stage - 1], embed_dim=config.embed_dim[self.stage], padding=config.patch_padding[self.stage], dropout_rate=config.drop_rate[self.stage], ) drop_path_rates = [x.item() for x in torch.linspace(0, config.drop_path_rate[self.stage], config.depth[stage])] self.layers = nn.Sequential( *[ CvtLayer( num_heads=config.num_heads[self.stage], embed_dim=config.embed_dim[self.stage], kernel_size=config.kernel_qkv[self.stage], padding_q=config.padding_q[self.stage], padding_kv=config.padding_kv[self.stage], stride_kv=config.stride_kv[self.stage], stride_q=config.stride_q[self.stage], qkv_projection_method=config.qkv_projection_method[self.stage], qkv_bias=config.qkv_bias[self.stage], attention_drop_rate=config.attention_drop_rate[self.stage], drop_rate=config.drop_rate[self.stage], drop_path_rate=drop_path_rates[self.stage], mlp_ratio=config.mlp_ratio[self.stage], with_cls_token=config.cls_token[self.stage], ) for _ in range(config.depth[self.stage]) ] ) def forward(self, hidden_state): cls_token = None hidden_state = self.embedding(hidden_state) batch_size, num_channels, height, width = hidden_state.shape # rearrange b c h w -> b (h w) c" hidden_state = hidden_state.view(batch_size, num_channels, height * width).permute(0, 2, 1) if self.config.cls_token[self.stage]: cls_token = self.cls_token.expand(batch_size, -1, -1) hidden_state = torch.cat((cls_token, hidden_state), dim=1) for layer in self.layers: layer_outputs = layer(hidden_state, height, width) hidden_state = layer_outputs if self.config.cls_token[self.stage]: cls_token, hidden_state = torch.split(hidden_state, [1, height * width], 1) hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width) return hidden_state, cls_token class CvtEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.stages = nn.ModuleList([]) for stage_idx in range(len(config.depth)): self.stages.append(CvtStage(config, stage_idx)) def forward(self, pixel_values, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None hidden_state = pixel_values cls_token = None for _, (stage_module) in enumerate(self.stages): hidden_state, cls_token = stage_module(hidden_state) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, cls_token, all_hidden_states] if v is not None) return BaseModelOutputWithCLSToken( last_hidden_state=hidden_state, cls_token_value=cls_token, hidden_states=all_hidden_states, ) class CvtPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = CvtConfig base_model_prefix = "cvt" main_input_name = "pixel_values" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, CvtStage): if self.config.cls_token[module.stage]: module.cls_token.data = nn.init.trunc_normal_( torch.zeros(1, 1, self.config.embed_dim[-1]), mean=0.0, std=self.config.initializer_range ) CVT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`CvtConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CVT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CvtImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Cvt Model transformer outputting raw hidden-states without any specific head on top.", CVT_START_DOCSTRING, ) class CvtModel(CvtPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.encoder = CvtEncoder(config) self.post_init() def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(CVT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithCLSToken, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithCLSToken]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") encoder_outputs = self.encoder( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithCLSToken( last_hidden_state=sequence_output, cls_token_value=encoder_outputs.cls_token_value, hidden_states=encoder_outputs.hidden_states, ) @add_start_docstrings( """ Cvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """, CVT_START_DOCSTRING, ) class CvtForImageClassification(CvtPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.cvt = CvtModel(config, add_pooling_layer=False) self.layernorm = nn.LayerNorm(config.embed_dim[-1]) # Classifier head self.classifier = ( nn.Linear(config.embed_dim[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CVT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.cvt( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] cls_token = outputs[1] if self.config.cls_token[-1]: sequence_output = self.layernorm(cls_token) else: batch_size, num_channels, height, width = sequence_output.shape # rearrange "b c h w -> b (h w) c" sequence_output = sequence_output.view(batch_size, num_channels, height * width).permute(0, 2, 1) sequence_output = self.layernorm(sequence_output) sequence_output_mean = sequence_output.mean(dim=1) logits = self.classifier(sequence_output_mean) loss = None if labels is not None: if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = "regression" elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
transformers/src/transformers/models/cvt/modeling_cvt.py/0
{ "file_path": "transformers/src/transformers/models/cvt/modeling_cvt.py", "repo_id": "transformers", "token_count": 12594 }
320
# coding=utf-8 # Copyright 2021 Microsoft and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 DeBERTa model.""" from __future__ import annotations import math from typing import Dict, Optional, Sequence, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFMaskedLMOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_deberta import DebertaConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "DebertaConfig" _CHECKPOINT_FOR_DOC = "kamalkraj/deberta-base" TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [ "kamalkraj/deberta-base", # See all DeBERTa models at https://huggingface.co/models?filter=DeBERTa ] class TFDebertaContextPooler(keras.layers.Layer): def __init__(self, config: DebertaConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.pooler_hidden_size, name="dense") self.dropout = TFDebertaStableDropout(config.pooler_dropout, name="dropout") self.config = config def call(self, hidden_states, training: bool = False): # We "pool" the model by simply taking the hidden state corresponding # to the first token. context_token = hidden_states[:, 0] context_token = self.dropout(context_token, training=training) pooled_output = self.dense(context_token) pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output) return pooled_output @property def output_dim(self) -> int: return self.config.hidden_size def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.pooler_hidden_size]) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) class TFDebertaXSoftmax(keras.layers.Layer): """ Masked Softmax which is optimized for saving memory Args: input (`tf.Tensor`): The input tensor that will apply softmax. mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation. dim (int): The dimension that will apply softmax """ def __init__(self, axis=-1, **kwargs): super().__init__(**kwargs) self.axis = axis def call(self, inputs: tf.Tensor, mask: tf.Tensor): rmask = tf.logical_not(tf.cast(mask, tf.bool)) output = tf.where(rmask, float("-inf"), inputs) output = stable_softmax(output, self.axis) output = tf.where(rmask, 0.0, output) return output class TFDebertaStableDropout(keras.layers.Layer): """ Optimized dropout module for stabilizing the training Args: drop_prob (float): the dropout probabilities """ def __init__(self, drop_prob, **kwargs): super().__init__(**kwargs) self.drop_prob = drop_prob @tf.custom_gradient def xdropout(self, inputs): """ Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob. """ mask = tf.cast( 1 - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), tf.bool, ) scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32) if self.drop_prob > 0: inputs = tf.where(mask, 0.0, inputs) * scale def grad(upstream): if self.drop_prob > 0: return tf.where(mask, 0.0, upstream) * scale else: return upstream return inputs, grad def call(self, inputs: tf.Tensor, training: tf.Tensor = False): if training: return self.xdropout(inputs) return inputs class TFDebertaLayerNorm(keras.layers.Layer): """LayerNorm module in the TF style (epsilon inside the square root).""" def __init__(self, size, eps=1e-12, **kwargs): super().__init__(**kwargs) self.size = size self.eps = eps def build(self, input_shape): self.gamma = self.add_weight(shape=[self.size], initializer=tf.ones_initializer(), name="weight") self.beta = self.add_weight(shape=[self.size], initializer=tf.zeros_initializer(), name="bias") return super().build(input_shape) def call(self, x: tf.Tensor) -> tf.Tensor: mean = tf.reduce_mean(x, axis=[-1], keepdims=True) variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True) std = tf.math.sqrt(variance + self.eps) return self.gamma * (x - mean) / std + self.beta class TFDebertaSelfOutput(keras.layers.Layer): def __init__(self, config: DebertaConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.hidden_size, name="dense") self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout") self.config = config def call(self, hidden_states, input_tensor, training: bool = False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) class TFDebertaAttention(keras.layers.Layer): def __init__(self, config: DebertaConfig, **kwargs): super().__init__(**kwargs) self.self = TFDebertaDisentangledSelfAttention(config, name="self") self.dense_output = TFDebertaSelfOutput(config, name="output") self.config = config def call( self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, query_states: tf.Tensor = None, relative_pos: tf.Tensor = None, rel_embeddings: tf.Tensor = None, output_attentions: bool = False, training: bool = False, ) -> Tuple[tf.Tensor]: self_outputs = self.self( hidden_states=input_tensor, attention_mask=attention_mask, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, output_attentions=output_attentions, training=training, ) if query_states is None: query_states = input_tensor attention_output = self.dense_output( hidden_states=self_outputs[0], input_tensor=query_states, training=training ) output = (attention_output,) + self_outputs[1:] return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self", None) is not None: with tf.name_scope(self.self.name): self.self.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) class TFDebertaIntermediate(keras.layers.Layer): def __init__(self, config: DebertaConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFDebertaOutput(keras.layers.Layer): def __init__(self, config: DebertaConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout") self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) class TFDebertaLayer(keras.layers.Layer): def __init__(self, config: DebertaConfig, **kwargs): super().__init__(**kwargs) self.attention = TFDebertaAttention(config, name="attention") self.intermediate = TFDebertaIntermediate(config, name="intermediate") self.bert_output = TFDebertaOutput(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, query_states: tf.Tensor = None, relative_pos: tf.Tensor = None, rel_embeddings: tf.Tensor = None, output_attentions: bool = False, training: bool = False, ) -> Tuple[tf.Tensor]: attention_outputs = self.attention( input_tensor=hidden_states, attention_mask=attention_mask, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, output_attentions=output_attentions, training=training, ) attention_output = attention_outputs[0] intermediate_output = self.intermediate(hidden_states=attention_output) layer_output = self.bert_output( hidden_states=intermediate_output, input_tensor=attention_output, training=training ) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "bert_output", None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) class TFDebertaEncoder(keras.layers.Layer): def __init__(self, config: DebertaConfig, **kwargs): super().__init__(**kwargs) self.layer = [TFDebertaLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] self.relative_attention = getattr(config, "relative_attention", False) self.config = config if self.relative_attention: self.max_relative_positions = getattr(config, "max_relative_positions", -1) if self.max_relative_positions < 1: self.max_relative_positions = config.max_position_embeddings def build(self, input_shape=None): if self.built: return self.built = True if self.relative_attention: self.rel_embeddings = self.add_weight( name="rel_embeddings.weight", shape=[self.max_relative_positions * 2, self.config.hidden_size], initializer=get_initializer(self.config.initializer_range), ) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) def get_rel_embedding(self): rel_embeddings = self.rel_embeddings if self.relative_attention else None return rel_embeddings def get_attention_mask(self, attention_mask): if len(shape_list(attention_mask)) <= 2: extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2) attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1) attention_mask = tf.cast(attention_mask, tf.uint8) elif len(shape_list(attention_mask)) == 3: attention_mask = tf.expand_dims(attention_mask, 1) return attention_mask def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None): if self.relative_attention and relative_pos is None: q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2] relative_pos = build_relative_position(q, shape_list(hidden_states)[-2]) return relative_pos def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, query_states: tf.Tensor = None, relative_pos: tf.Tensor = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, training: bool = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None attention_mask = self.get_attention_mask(attention_mask) relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos) if isinstance(hidden_states, Sequence): next_kv = hidden_states[0] else: next_kv = hidden_states rel_embeddings = self.get_rel_embedding() for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states=next_kv, attention_mask=attention_mask, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if query_states is not None: query_states = hidden_states if isinstance(hidden_states, Sequence): next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None else: next_kv = hidden_states if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) def build_relative_position(query_size, key_size): """ Build relative position according to the query and key We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q - P_k\\) Args: query_size (int): the length of query key_size (int): the length of key Return: `tf.Tensor`: A tensor with shape [1, query_size, key_size] """ q_ids = tf.range(query_size, dtype=tf.int32) k_ids = tf.range(key_size, dtype=tf.int32) rel_pos_ids = q_ids[:, None] - tf.tile(tf.reshape(k_ids, [1, -1]), [query_size, 1]) rel_pos_ids = rel_pos_ids[:query_size, :] rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0) return tf.cast(rel_pos_ids, tf.int64) def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos): shapes = [ shape_list(query_layer)[0], shape_list(query_layer)[1], shape_list(query_layer)[2], shape_list(relative_pos)[-1], ] return tf.broadcast_to(c2p_pos, shapes) def p2c_dynamic_expand(c2p_pos, query_layer, key_layer): shapes = [ shape_list(query_layer)[0], shape_list(query_layer)[1], shape_list(key_layer)[-2], shape_list(key_layer)[-2], ] return tf.broadcast_to(c2p_pos, shapes) def pos_dynamic_expand(pos_index, p2c_att, key_layer): shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]] return tf.broadcast_to(pos_index, shapes) def torch_gather(x, indices, gather_axis): if gather_axis < 0: gather_axis = tf.rank(x) + gather_axis if gather_axis != tf.rank(x) - 1: pre_roll = tf.rank(x) - 1 - gather_axis permutation = tf.roll(tf.range(tf.rank(x)), pre_roll, axis=0) x = tf.transpose(x, perm=permutation) indices = tf.transpose(indices, perm=permutation) else: pre_roll = 0 flat_x = tf.reshape(x, (-1, tf.shape(x)[-1])) flat_indices = tf.reshape(indices, (-1, tf.shape(indices)[-1])) gathered = tf.gather(flat_x, flat_indices, batch_dims=1) gathered = tf.reshape(gathered, tf.shape(indices)) if pre_roll != 0: permutation = tf.roll(tf.range(tf.rank(x)), -pre_roll, axis=0) gathered = tf.transpose(gathered, perm=permutation) return gathered class TFDebertaDisentangledSelfAttention(keras.layers.Layer): """ Disentangled self-attention module Parameters: config (`str`): A model config class instance with the configuration to build a new model. The schema is similar to *BertConfig*, for more details, please refer [`DebertaConfig`] """ def __init__(self, config: DebertaConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.in_proj = keras.layers.Dense( self.all_head_size * 3, kernel_initializer=get_initializer(config.initializer_range), name="in_proj", use_bias=False, ) self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else [] self.relative_attention = getattr(config, "relative_attention", False) self.talking_head = getattr(config, "talking_head", False) if self.talking_head: self.head_logits_proj = keras.layers.Dense( self.num_attention_heads, kernel_initializer=get_initializer(config.initializer_range), name="head_logits_proj", use_bias=False, ) self.head_weights_proj = keras.layers.Dense( self.num_attention_heads, kernel_initializer=get_initializer(config.initializer_range), name="head_weights_proj", use_bias=False, ) self.softmax = TFDebertaXSoftmax(axis=-1) if self.relative_attention: self.max_relative_positions = getattr(config, "max_relative_positions", -1) if self.max_relative_positions < 1: self.max_relative_positions = config.max_position_embeddings self.pos_dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="pos_dropout") if "c2p" in self.pos_att_type: self.pos_proj = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="pos_proj", use_bias=False, ) if "p2c" in self.pos_att_type: self.pos_q_proj = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="pos_q_proj" ) self.dropout = TFDebertaStableDropout(config.attention_probs_dropout_prob, name="dropout") self.config = config def build(self, input_shape=None): if self.built: return self.built = True self.q_bias = self.add_weight( name="q_bias", shape=(self.all_head_size), initializer=keras.initializers.Zeros() ) self.v_bias = self.add_weight( name="v_bias", shape=(self.all_head_size), initializer=keras.initializers.Zeros() ) if getattr(self, "in_proj", None) is not None: with tf.name_scope(self.in_proj.name): self.in_proj.build([None, None, self.config.hidden_size]) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, "head_logits_proj", None) is not None: with tf.name_scope(self.head_logits_proj.name): self.head_logits_proj.build(None) if getattr(self, "head_weights_proj", None) is not None: with tf.name_scope(self.head_weights_proj.name): self.head_weights_proj.build(None) if getattr(self, "pos_dropout", None) is not None: with tf.name_scope(self.pos_dropout.name): self.pos_dropout.build(None) if getattr(self, "pos_proj", None) is not None: with tf.name_scope(self.pos_proj.name): self.pos_proj.build([self.config.hidden_size]) if getattr(self, "pos_q_proj", None) is not None: with tf.name_scope(self.pos_q_proj.name): self.pos_q_proj.build([self.config.hidden_size]) def transpose_for_scores(self, tensor: tf.Tensor) -> tf.Tensor: shape = shape_list(tensor)[:-1] + [self.num_attention_heads, -1] # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=shape) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, query_states: tf.Tensor = None, relative_pos: tf.Tensor = None, rel_embeddings: tf.Tensor = None, output_attentions: bool = False, training: bool = False, ) -> Tuple[tf.Tensor]: """ Call the module Args: hidden_states (`tf.Tensor`): Input states to the module usually the output from previous layer, it will be the Q,K and V in *Attention(Q,K,V)* attention_mask (`tf.Tensor`): An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j* th token. return_att (`bool`, optional): Whether return the attention matrix. query_states (`tf.Tensor`, optional): The *Q* state in *Attention(Q,K,V)*. relative_pos (`tf.Tensor`): The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with values ranging in [*-max_relative_positions*, *max_relative_positions*]. rel_embeddings (`tf.Tensor`): The embedding of relative distances. It's a tensor of shape [\\(2 \\times \\text{max_relative_positions}\\), *hidden_size*]. """ if query_states is None: qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1) query_layer, key_layer, value_layer = tf.split( self.transpose_for_scores(qp), num_or_size_splits=3, axis=-1 ) else: def linear(w, b, x): out = tf.matmul(x, w, transpose_b=True) if b is not None: out += tf.transpose(b) return out ws = tf.split( tf.transpose(self.in_proj.weight[0]), num_or_size_splits=self.num_attention_heads * 3, axis=0 ) qkvw = tf.TensorArray(dtype=tf.float32, size=3) for k in tf.range(3): qkvw_inside = tf.TensorArray(dtype=tf.float32, size=self.num_attention_heads) for i in tf.range(self.num_attention_heads): qkvw_inside = qkvw_inside.write(i, ws[i * 3 + k]) qkvw = qkvw.write(k, qkvw_inside.concat()) qkvb = [None] * 3 q = linear(qkvw[0], qkvb[0], query_states) k = linear(qkvw[1], qkvb[1], hidden_states) v = linear(qkvw[2], qkvb[2], hidden_states) query_layer = self.transpose_for_scores(q) key_layer = self.transpose_for_scores(k) value_layer = self.transpose_for_scores(v) query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :]) value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :]) rel_att = None # Take the dot product between "query" and "key" to get the raw attention scores. scale_factor = 1 + len(self.pos_att_type) scale = math.sqrt(shape_list(query_layer)[-1] * scale_factor) query_layer = query_layer / scale attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 1, 3, 2])) if self.relative_attention: rel_embeddings = self.pos_dropout(rel_embeddings, training=training) rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor) if rel_att is not None: attention_scores = attention_scores + rel_att if self.talking_head: attention_scores = tf.transpose( self.head_logits_proj(tf.transpose(attention_scores, [0, 2, 3, 1])), [0, 3, 1, 2] ) attention_probs = self.softmax(attention_scores, attention_mask) attention_probs = self.dropout(attention_probs, training=training) if self.talking_head: attention_probs = tf.transpose( self.head_weights_proj(tf.transpose(attention_probs, [0, 2, 3, 1])), [0, 3, 1, 2] ) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, [0, 2, 1, 3]) context_layer_shape = shape_list(context_layer) # Set the final dimension here explicitly. # Calling tf.reshape(context_layer, (*context_layer_shape[:-2], -1)) raises an error when executing # the model in graph mode as context_layer is reshaped to (None, 7, None) and Dense layer in TFDebertaV2SelfOutput # requires final input dimension to be defined new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]] context_layer = tf.reshape(context_layer, new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor): if relative_pos is None: q = shape_list(query_layer)[-2] relative_pos = build_relative_position(q, shape_list(key_layer)[-2]) shape_list_pos = shape_list(relative_pos) if len(shape_list_pos) == 2: relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0) elif len(shape_list_pos) == 3: relative_pos = tf.expand_dims(relative_pos, 1) # bxhxqxk elif len(shape_list_pos) != 4: raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}") att_span = tf.cast( tf.minimum( tf.maximum(shape_list(query_layer)[-2], shape_list(key_layer)[-2]), self.max_relative_positions ), tf.int64, ) rel_embeddings = tf.expand_dims( rel_embeddings[self.max_relative_positions - att_span : self.max_relative_positions + att_span, :], 0 ) score = 0 # content->position if "c2p" in self.pos_att_type: pos_key_layer = self.pos_proj(rel_embeddings) pos_key_layer = self.transpose_for_scores(pos_key_layer) c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 1, 3, 2])) c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1) c2p_att = torch_gather(c2p_att, c2p_dynamic_expand(c2p_pos, query_layer, relative_pos), -1) score += c2p_att # position->content if "p2c" in self.pos_att_type: pos_query_layer = self.pos_q_proj(rel_embeddings) pos_query_layer = self.transpose_for_scores(pos_query_layer) pos_query_layer /= tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, dtype=tf.float32)) if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]: r_pos = build_relative_position(shape_list(key_layer)[-2], shape_list(key_layer)[-2]) else: r_pos = relative_pos p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1) p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 1, 3, 2])) p2c_att = tf.transpose( torch_gather(p2c_att, p2c_dynamic_expand(p2c_pos, query_layer, key_layer), -1), [0, 1, 3, 2] ) if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]: pos_index = tf.expand_dims(relative_pos[:, :, :, 0], -1) p2c_att = torch_gather(p2c_att, pos_dynamic_expand(pos_index, p2c_att, key_layer), -2) score += p2c_att return score class TFDebertaEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = getattr(config, "embedding_size", config.hidden_size) self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.position_biased_input = getattr(config, "position_biased_input", True) self.initializer_range = config.initializer_range if self.embedding_size != config.hidden_size: self.embed_proj = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="embed_proj", use_bias=False, ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout") def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("token_type_embeddings"): if self.config.type_vocab_size > 0: self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) else: self.token_type_embeddings = None with tf.name_scope("position_embeddings"): if self.position_biased_input: self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) else: self.position_embeddings = None if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, "embed_proj", None) is not None: with tf.name_scope(self.embed_proj.name): self.embed_proj.build([None, None, self.embedding_size]) def call( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, mask: tf.Tensor = None, training: bool = False, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ if input_ids is None and inputs_embeds is None: raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) final_embeddings = inputs_embeds if self.position_biased_input: position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) final_embeddings += position_embeds if self.config.type_vocab_size > 0: token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings += token_type_embeds if self.embedding_size != self.hidden_size: final_embeddings = self.embed_proj(final_embeddings) final_embeddings = self.LayerNorm(final_embeddings) if mask is not None: if len(shape_list(mask)) != len(shape_list(final_embeddings)): if len(shape_list(mask)) == 4: mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1) mask = tf.cast(tf.expand_dims(mask, axis=2), tf.float32) final_embeddings = final_embeddings * mask final_embeddings = self.dropout(final_embeddings, training=training) return final_embeddings class TFDebertaPredictionHeadTransform(keras.layers.Layer): def __init__(self, config: DebertaConfig, **kwargs): super().__init__(**kwargs) self.embedding_size = getattr(config, "embedding_size", config.hidden_size) self.dense = keras.layers.Dense( units=self.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.embedding_size]) class TFDebertaLMPredictionHead(keras.layers.Layer): def __init__(self, config: DebertaConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = getattr(config, "embedding_size", config.hidden_size) self.transform = TFDebertaPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") if self.built: return self.built = True if getattr(self, "transform", None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) def get_output_embeddings(self) -> keras.layers.Layer: return self.input_embeddings def set_output_embeddings(self, value: tf.Variable): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.transform(hidden_states=hidden_states) seq_length = shape_list(hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states class TFDebertaOnlyMLMHead(keras.layers.Layer): def __init__(self, config: DebertaConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.predictions = TFDebertaLMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(hidden_states=sequence_output) return prediction_scores def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) # @keras_serializable class TFDebertaMainLayer(keras.layers.Layer): config_class = DebertaConfig def __init__(self, config: DebertaConfig, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFDebertaEmbeddings(config, name="embeddings") self.encoder = TFDebertaEncoder(config, name="encoder") def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, mask=attention_mask, training=training, ) encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return TFBaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) class TFDebertaPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DebertaConfig base_model_prefix = "deberta" DEBERTA_START_DOCSTRING = r""" The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data. This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`DebertaConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ DEBERTA_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple. """ @add_start_docstrings( "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.", DEBERTA_START_DOCSTRING, ) class TFDebertaModel(TFDebertaPreTrainedModel): def __init__(self, config: DebertaConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.deberta = TFDebertaMainLayer(config, name="deberta") @unpack_inputs @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: outputs = self.deberta( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "deberta", None) is not None: with tf.name_scope(self.deberta.name): self.deberta.build(None) @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING) class TFDebertaForMaskedLM(TFDebertaPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config: DebertaConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if config.is_decoder: logger.warning( "If you want to use `TFDebertaForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.deberta = TFDebertaMainLayer(config, name="deberta") self.mlm = TFDebertaOnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name="cls") def get_lm_head(self) -> keras.layers.Layer: return self.mlm.predictions @unpack_inputs @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ outputs = self.deberta( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.mlm(sequence_output=sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "deberta", None) is not None: with tf.name_scope(self.deberta.name): self.deberta.build(None) if getattr(self, "mlm", None) is not None: with tf.name_scope(self.mlm.name): self.mlm.build(None) @add_start_docstrings( """ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, DEBERTA_START_DOCSTRING, ) class TFDebertaForSequenceClassification(TFDebertaPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: DebertaConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.deberta = TFDebertaMainLayer(config, name="deberta") self.pooler = TFDebertaContextPooler(config, name="pooler") drop_out = getattr(config, "cls_dropout", None) drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out self.dropout = TFDebertaStableDropout(drop_out, name="cls_dropout") self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", ) self.output_dim = self.pooler.output_dim @unpack_inputs @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.deberta( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] pooled_output = self.pooler(sequence_output, training=training) pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "deberta", None) is not None: with tf.name_scope(self.deberta.name): self.deberta.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.output_dim]) @add_start_docstrings( """ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, DEBERTA_START_DOCSTRING, ) class TFDebertaForTokenClassification(TFDebertaPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config: DebertaConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.deberta = TFDebertaMainLayer(config, name="deberta") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.deberta( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(inputs=sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "deberta", None) is not None: with tf.name_scope(self.deberta.name): self.deberta.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, DEBERTA_START_DOCSTRING, ) class TFDebertaForQuestionAnswering(TFDebertaPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config: DebertaConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.deberta = TFDebertaMainLayer(config, name="deberta") self.qa_outputs = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, start_positions: np.ndarray | tf.Tensor | None = None, end_positions: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: r""" start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ outputs = self.deberta( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(inputs=sequence_output) start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1) start_logits = tf.squeeze(input=start_logits, axis=-1) end_logits = tf.squeeze(input=end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "deberta", None) is not None: with tf.name_scope(self.deberta.name): self.deberta.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size])
transformers/src/transformers/models/deberta/modeling_tf_deberta.py/0
{ "file_path": "transformers/src/transformers/models/deberta/modeling_tf_deberta.py", "repo_id": "transformers", "token_count": 30749 }
321
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch M-CTC-T model.""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ....activations import ACT2FN from ....file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ....integrations.deepspeed import is_deepspeed_zero3_enabled from ....modeling_attn_mask_utils import _prepare_4d_attention_mask from ....modeling_outputs import BaseModelOutput, CausalLMOutput from ....modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from ....utils import logging from .configuration_mctct import MCTCTConfig logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 1 _CONFIG_FOR_DOC = "MCTCTConfig" # Base docstring _CHECKPOINT_FOR_DOC = "speechbrain/m-ctc-t-large" _EXPECTED_OUTPUT_SHAPE = [1, 195, 1536] # CTC docstring _CTC_EXPECTED_OUTPUT = '"Mr. Quilter is the apostle of the middle classes, and we\'re glad to welcome his gospel."' _CTC_EXPECTED_LOSS = 1885.65 MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "speechbrain/m-ctc-t-large", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct ] class MCTCTConv1dSubsampler(nn.Module): """ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation via gated linear units (https://arxiv.org/abs/1911.08460) """ def __init__(self, config): super().__init__() self.config = config self.glu_dim = config.conv_glu_dim self.dropout = nn.Dropout(config.conv_dropout) self.num_layers = config.num_conv_layers self.in_channels = config.input_feat_per_channel * config.input_channels if self.num_layers > 1: if config.conv_channels is None: raise ValueError( "Need to specify `conv_channels` configuration in `MCTCTConfig` to use multiple convolution" " layers." ) self.mid_channels = config.conv_channels else: self.mid_channels = None self.out_channels = config.hidden_size * 2 # considering GLU halving self.kernel_size = config.conv_kernel self.stride = config.conv_stride # NOTE: MCTCT by construction only uses one convolution kernel. I've made this flexible to allow for # multiple layers of convolutions, but not sure if this model definition should just restrict it # to one layer. This becomes especially relevant when considering the padding like line 1 of forward(). self.conv_layers = nn.ModuleList( nn.Conv1d( self.in_channels if i == 0 else self.mid_channels[i], self.mid_channels[i] if i < self.num_layers - 1 else self.out_channels, kernel_size=k, stride=self.stride[i], padding="valid", ) for i, k in enumerate(self.kernel_size) ) def forward(self, input_features): # NOTE: in reference to the NOTE in __init__, right now it just calculates padding as if # there will be just one conv layer. padding = sum([size // 2 for size in self.kernel_size]) # (7, 7) -> (3, 3) input_features = torch.nn.functional.pad(input_features, (0, 0, padding, padding), "constant", 0) hidden_states = input_features.transpose(1, 2).contiguous() # -> Batch x Frame x Time for conv in self.conv_layers: hidden_states = conv(hidden_states) hidden_states = nn.functional.glu(hidden_states, dim=self.glu_dim) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.transpose(1, 2).contiguous() # -> Batch x Time x Frame return hidden_states class MCTCTEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file # self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.LayerNorm = MCTCTLayerNorm() self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) def forward( self, input_features=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): input_shape = input_features.size() if input_features is not None else inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_features) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class MCTCTSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_dim self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def reshape_fortran(self, x, shape): if len(x.shape) > 0: x = x.permute(*reversed(range(len(x.shape)))) return x.reshape(*reversed(shape)).permute(*reversed(range(len(shape)))) def relative_position_embedding_rotate(self, scores): # NOTE: should re-evaluate whether this re-implementation was truly necessary # or the reason why my complete re-haul worked was due to some other part # of the code. Adding this and the reshape fortrain code seems very undesirable. scores = scores.permute(0, 2, 3, 1) # e.g. [10, 1839, 14, 4] batch, hidden_state, seq_len, heads = scores.shape # e.g. [10, 1853, 14, 4] scores = torch.cat((scores, torch.zeros((batch, seq_len, seq_len, heads), device=scores.device)), dim=1) # e.g. [10, 25942, 1, 4] scores = self.reshape_fortran(scores, [batch, (hidden_state + seq_len) * seq_len, 1, heads]) # e.g. [10, 25928, 1, 4] scores = scores[:, : (seq_len + hidden_state - 1) * seq_len] # e.g. [10, 1852, 14, 4] scores = self.reshape_fortran(scores, [batch, hidden_state + seq_len - 1, seq_len, heads]) halfpoint = hidden_state // 2 scores = scores[:, halfpoint : halfpoint + seq_len].transpose(1, 2) # e.g. [10, 14, 14, 4] return scores.permute(0, 3, 1, 2) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) mixed_query_layer = mixed_query_layer / math.sqrt(self.attention_head_size) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) # relative key position embeddings positional_embedding = self.distance_embedding.weight relative_position_scores = torch.einsum("lh, bche -> bcle", positional_embedding, query_layer.transpose(2, 3)) relative_position_scores = self.relative_position_embedding_rotate(relative_position_scores) attention_scores = attention_scores + relative_position_scores if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in MCTCTModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).flatten(start_dim=-2) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class MCTCTLayerNorm(nn.Module): def __init__(self): super().__init__() self.singleton_weight = nn.Parameter(torch.ones(1)) self.singleton_bias = nn.Parameter(torch.zeros(1)) def forward(self, hidden_states): return (hidden_states * self.singleton_weight) + self.singleton_bias class MCTCTSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.config = config self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MCTCTAttention(nn.Module): def __init__(self, config): super().__init__() self.self = MCTCTSelfAttention(config) self.output = MCTCTSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class MCTCTIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size, bias=False) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class MCTCTOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size, bias=False) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MCTCTLayer(nn.Module): def __init__(self, config: MCTCTConfig): super().__init__() self.seq_len_dim = 1 self.chunk_size_feed_forward = config.chunk_size_feed_forward self.intermediate = MCTCTIntermediate(config) self.attention = MCTCTAttention(config) self.is_decoder = config.is_decoder self.output = MCTCTOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class MCTCTPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MCTCTConfig base_model_prefix = "mctct" main_input_name = "input_features" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" std = self.config.initializer_range if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, MCTCTLayerNorm): module.singleton_weight.data.fill_(1.0) module.singleton_bias.data.zero_() if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers """ dilation = 1 for _, kernel_sz, stride in zip( range(self.config.num_conv_layers), self.config.conv_kernel, self.config.conv_stride ): padding = kernel_sz // 2 input_lengths = input_lengths + 2 * padding - dilation * (kernel_sz - 1) - 1 input_lengths = torch.div(input_lengths, stride, rounding_mode="trunc") + 1 return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask): # generate creates 3D attention mask, because of the shape of input_features # convert it to 2D if thats the case if len(attention_mask.shape) > 2: attention_mask = attention_mask[:, :, -1] # subsampled_lengths = attention_mask.sum(-1) subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)) bsz = attention_mask.size()[0] attention_mask = torch.zeros( (bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long() return attention_mask MCTCT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MCTCTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MCTCT_INPUTS_DOCSTRING = r""" Args: input_features (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`Wav2Vec2CTCTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ class MCTCTEncoder(MCTCTPreTrainedModel): def __init__(self, config: MCTCTConfig): super().__init__(config) self.hidden_dropout_prob = config.hidden_dropout_prob self.layer_norm = MCTCTLayerNorm() self.conv = MCTCTConv1dSubsampler(config) self.layers = nn.ModuleList([MCTCTLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, input_features: torch.Tensor, attention_mask: torch.Tensor, head_mask: torch.Tensor, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_features = self.layer_norm(input_features) inputs_embeds = self.conv(input_features) # subsample attention mask if necessary if attention_mask is not None: attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask) hidden_states = nn.functional.dropout(inputs_embeds, p=self.hidden_dropout_prob, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, " f"but it is for {head_mask.size()[0]}." ) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) @add_start_docstrings( "The bare M-CTC-T Model transformer outputting raw hidden-states without any specific head on top.", MCTCT_START_DOCSTRING, ) class MCTCTModel(MCTCTPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.encoder = MCTCTEncoder(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_features: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_features is None: raise ValueError("You have to specify input_features.") encoder_outputs = self.encoder( input_features, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """MCTCT Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", MCTCT_START_DOCSTRING, ) class MCTCTForCTC(MCTCTPreTrainedModel): def __init__(self, config): super().__init__(config) self.mctct = MCTCTModel(config) if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that " "does not define the vocabulary size of the language model head. Please " "instantiate the model as follows: `MCTCTForCTC.from_pretrained(..., vocab_size=vocab_size)`. " "or define `vocab_size` of your model's configuration." ) output_hidden_size = config.hidden_size self.ctc_head = nn.Linear(output_hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS, ) def forward( self, input_features: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, ) -> Union[Tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mctct( input_features, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.ctc_head(hidden_states) loss = None if labels is not None: if labels.max() >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones(input_features.shape[:-1], dtype=torch.long) ) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions )
transformers/src/transformers/models/deprecated/mctct/modeling_mctct.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/mctct/modeling_mctct.py", "repo_id": "transformers", "token_count": 14036 }
322
# coding=utf-8 # Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TrajectoryTransformer model configuration""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": ( "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class TrajectoryTransformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TrajectoryTransformerModel`]. It is used to instantiate an TrajectoryTransformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TrajectoryTransformer [CarlCochet/trajectory-transformer-halfcheetah-medium-v2](https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 100): Vocabulary size of the TrajectoryTransformer model. Defines the number of different tokens that can be represented by the `trajectories` passed when calling [`TrajectoryTransformerModel`] action_weight (`int`, *optional*, defaults to 5): Weight of the action in the loss function reward_weight (`int`, *optional*, defaults to 1): Weight of the reward in the loss function value_weight (`int`, *optional*, defaults to 1): Weight of the value in the loss function block_size (`int`, *optional*, defaults to 249): Size of the blocks in the trajectory transformer. action_dim (`int`, *optional*, defaults to 6): Dimension of the action space. observation_dim (`int`, *optional*, defaults to 17): Dimension of the observation space. transition_dim (`int`, *optional*, defaults to 25): Dimension of the transition space. n_layer (`int`, *optional*, defaults to 4): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. n_embd (`int`, *optional*, defaults to 128): Dimensionality of the embeddings and hidden states. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`int`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. kaiming_initializer_range (`float, *optional*, defaults to 1): A coefficient scaling the negative slope of the kaiming initializer rectifier for EinLinear layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Example: ```python >>> from transformers import TrajectoryTransformerConfig, TrajectoryTransformerModel >>> # Initializing a TrajectoryTransformer CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration >>> configuration = TrajectoryTransformerConfig() >>> # Initializing a model (with random weights) from the CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration >>> model = TrajectoryTransformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "trajectory_transformer" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, vocab_size=100, action_weight=5, reward_weight=1, value_weight=1, block_size=249, action_dim=6, observation_dim=17, transition_dim=25, n_layer=4, n_head=4, n_embd=128, embd_pdrop=0.1, attn_pdrop=0.1, resid_pdrop=0.1, learning_rate=0.0006, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, kaiming_initializer_range=1, use_cache=True, pad_token_id=1, bos_token_id=50256, eos_token_id=50256, **kwargs, ): self.vocab_size = vocab_size self.action_weight = action_weight self.reward_weight = reward_weight self.value_weight = value_weight self.max_position_embeddings = max_position_embeddings self.block_size = block_size self.action_dim = action_dim self.observation_dim = observation_dim self.transition_dim = transition_dim self.learning_rate = learning_rate self.n_layer = n_layer self.n_head = n_head self.n_embd = n_embd self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.resid_pdrop = resid_pdrop self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.kaiming_initializer_range = kaiming_initializer_range self.use_cache = use_cache super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
transformers/src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py", "repo_id": "transformers", "token_count": 2817 }
323
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DepthAnything model configuration""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING logger = logging.get_logger(__name__) DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP = { "LiheYoung/depth-anything-small-hf": "https://huggingface.co/LiheYoung/depth-anything-small-hf/resolve/main/config.json", } class DepthAnythingConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DepthAnythingModel`]. It is used to instantiate an DepthAnything model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DepthAnything [LiheYoung/depth-anything-small-hf](https://huggingface.co/LiheYoung/depth-anything-small-hf) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`Union[Dict[str, Any], PretrainedConfig]`, *optional*): The configuration of the backbone model. Only used in case `is_hybrid` is `True` or in case you want to leverage the [`AutoBackbone`] API. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `False`): Whether to use pretrained weights for the backbone. patch_size (`int`, *optional*, defaults to 14): The size of the patches to extract from the backbone features. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. reassemble_hidden_size (`int`, *optional*, defaults to 384): The number of input channels of the reassemble layers. reassemble_factors (`List[int]`, *optional*, defaults to `[4, 2, 1, 0.5]`): The up/downsampling factors of the reassemble layers. neck_hidden_sizes (`List[str]`, *optional*, defaults to `[48, 96, 192, 384]`): The hidden sizes to project to for the feature maps of the backbone. fusion_hidden_size (`int`, *optional*, defaults to 64): The number of channels before fusion. head_in_index (`int`, *optional*, defaults to -1): The index of the features to use in the depth estimation head. head_hidden_size (`int`, *optional*, defaults to 32): The number of output channels in the second convolution of the depth estimation head. Example: ```python >>> from transformers import DepthAnythingConfig, DepthAnythingForDepthEstimation >>> # Initializing a DepthAnything small style configuration >>> configuration = DepthAnythingConfig() >>> # Initializing a model from the DepthAnything small style configuration >>> model = DepthAnythingForDepthEstimation(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "depth_anything" def __init__( self, backbone_config=None, backbone=None, use_pretrained_backbone=False, patch_size=14, initializer_range=0.02, reassemble_hidden_size=384, reassemble_factors=[4, 2, 1, 0.5], neck_hidden_sizes=[48, 96, 192, 384], fusion_hidden_size=64, head_in_index=-1, head_hidden_size=32, **kwargs, ): super().__init__(**kwargs) if use_pretrained_backbone: raise ValueError("Pretrained backbones are not supported yet.") if backbone_config is not None and backbone is not None: raise ValueError("You can't specify both `backbone` and `backbone_config`.") if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Dinov2` backbone.") backbone_config = CONFIG_MAPPING["dinov2"]( image_size=518, hidden_size=384, num_attention_heads=6, out_indices=[9, 10, 11, 12], apply_layernorm=True, reshape_hidden_states=False, ) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.reassemble_hidden_size = reassemble_hidden_size self.patch_size = patch_size self.initializer_range = initializer_range self.reassemble_factors = reassemble_factors self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size self.head_in_index = head_in_index self.head_hidden_size = head_hidden_size def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns: `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ output = copy.deepcopy(self.__dict__) if output["backbone_config"] is not None: output["backbone_config"] = self.backbone_config.to_dict() output["model_type"] = self.__class__.model_type return output
transformers/src/transformers/models/depth_anything/configuration_depth_anything.py/0
{ "file_path": "transformers/src/transformers/models/depth_anything/configuration_depth_anything.py", "repo_id": "transformers", "token_count": 2443 }
324
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for EnCodec.""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging logger = logging.get_logger(__name__) class EncodecFeatureExtractor(SequenceFeatureExtractor): r""" Constructs an EnCodec feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Instantiating a feature extractor with the defaults will yield a similar configuration to that of the [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. Use 1 for mono, 2 for stereo. sampling_rate (`int`, *optional*, defaults to 24000): The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values. chunk_length_s (`float`, *optional*): If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded. overlap (`float`, *optional*): Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following formulae : `int((1.0 - self.overlap) * self.chunk_length)`. """ model_input_names = ["input_values", "padding_mask"] def __init__( self, feature_size: int = 1, sampling_rate: int = 24000, padding_value: float = 0.0, chunk_length_s: float = None, overlap: float = None, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.chunk_length_s = chunk_length_s self.overlap = overlap # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_length(self) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate) # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_stride(self) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length)) def __call__( self, raw_audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Optional[Union[bool, str, PaddingStrategy]] = None, truncation: Optional[bool] = False, max_length: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_audio (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape `(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio (`feature_size = 2`). padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, *optional*, defaults to `False`): Activates truncation to cut input sequences longer than `max_length` to `max_length`. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one.") elif padding is None: # by default let's pad the inputs padding = True is_batched = bool( isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list))) ) if is_batched: raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio] elif not is_batched and not isinstance(raw_audio, np.ndarray): raw_audio = np.asarray(raw_audio, dtype=np.float32) elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64): raw_audio = raw_audio.astype(np.float32) # always return batch if not is_batched: raw_audio = [np.asarray(raw_audio).T] # verify inputs are valid for idx, example in enumerate(raw_audio): if example.ndim > 2: raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}") if self.feature_size == 1 and example.ndim != 1: raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels") if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels") padded_inputs = None input_values = BatchFeature({"input_values": raw_audio}) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: max_length = min(array.shape[0] for array in raw_audio) nb_step = int(np.floor(max_length / self.chunk_stride)) max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: max_length = max(array.shape[0] for array in raw_audio) nb_step = int(np.ceil(max_length / self.chunk_stride)) max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length padding = "max_length" else: padded_inputs = input_values # normal padding on batch if padded_inputs is None: padded_inputs = self.pad( input_values, max_length=max_length, truncation=truncation, padding=padding, return_attention_mask=padding, ) if padding: padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask") input_values = [] for example in padded_inputs.pop("input_values"): if self.feature_size == 1: example = example[..., None] input_values.append(example.T) padded_inputs["input_values"] = input_values if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs
transformers/src/transformers/models/encodec/feature_extraction_encodec.py/0
{ "file_path": "transformers/src/transformers/models/encodec/feature_extraction_encodec.py", "repo_id": "transformers", "token_count": 4073 }
325
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ESM checkpoint.""" import argparse import pathlib from pathlib import Path from tempfile import TemporaryDirectory import esm as esm_module import torch from esm.esmfold.v1.misc import batch_encode_sequences as esmfold_encode_sequences from esm.esmfold.v1.pretrained import esmfold_v1 from transformers.models.esm.configuration_esm import EsmConfig, EsmFoldConfig from transformers.models.esm.modeling_esm import ( EsmForMaskedLM, EsmForSequenceClassification, EsmIntermediate, EsmLayer, EsmOutput, EsmSelfAttention, EsmSelfOutput, ) from transformers.models.esm.modeling_esmfold import EsmForProteinFolding from transformers.models.esm.tokenization_esm import EsmTokenizer from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) SAMPLE_DATA = [ ( "protein1", "MNGTEGPNFYVPFSNATGVVRSPFEYPQYYLAEPWQFSMLAAYMFLLIVLGFPINFLTLYVTVQHKKLRTPLNYILLNLAVADLFMVLGGFTSTLYTSLHGYFVFGPTGCNLEGFFATLGGEIALWSLVVLAIERYVVVCKPMSNFRFGENHAIMGVAFTWVMALACAAPPLAGWSRYIPEGLQCSCGIDYYTLKPEVNNESFVIYMFVVHFTIPMIIIFFCYGQLVFTVKEAAAQQQESATTQKAEKEVTRMVIIMVIAFLICWVPYASVAFYIFTHQGSNFGPIFMTIPAFFAKSAAIYNPVIYIMMNKQFRNCMLTTICCGKNPLGDDEASATVSKTETSQVAPA", ), ("protein2", "MKTVRQERLKSIVRILERSKEPVSGAQLAEELSVSRQVIVQDIAYLRSLGYNIVATPRGYVLA"), ("protein3", "MKTVRQERLKSI<mask>RILERSKEPVSGAQLAEELS<mask>SRQVIVQDIAYLRSLGYN<mask>VATPRGYVLAGG"), ("protein4", "MKTVRQERLKSI<mask>RILERSKEPVSGAQLAEELS<mask>SRQVIVQDIAYLRSLGYN<mask>VATPRGYVLA"), ] MODEL_MAPPING = { "esm1b_t33_650M_UR50S": esm_module.pretrained.esm1b_t33_650M_UR50S, "esm1v_t33_650M_UR90S_1": esm_module.pretrained.esm1v_t33_650M_UR90S_1, "esm1v_t33_650M_UR90S_2": esm_module.pretrained.esm1v_t33_650M_UR90S_2, "esm1v_t33_650M_UR90S_3": esm_module.pretrained.esm1v_t33_650M_UR90S_3, "esm1v_t33_650M_UR90S_4": esm_module.pretrained.esm1v_t33_650M_UR90S_4, "esm1v_t33_650M_UR90S_5": esm_module.pretrained.esm1v_t33_650M_UR90S_5, "esm2_t48_15B_UR50D": esm_module.pretrained.esm2_t48_15B_UR50D, "esm2_t36_3B_UR50D": esm_module.pretrained.esm2_t36_3B_UR50D, "esm2_t33_650M_UR50D": esm_module.pretrained.esm2_t33_650M_UR50D, "esm2_t30_150M_UR50D": esm_module.pretrained.esm2_t30_150M_UR50D, "esm2_t12_35M_UR50D": esm_module.pretrained.esm2_t12_35M_UR50D, "esm2_t6_8M_UR50D": esm_module.pretrained.esm2_t6_8M_UR50D, "esmfold_v1": esmfold_v1, } restypes = list("ARNDCQEGHILKMFPSTWYV") restypes_with_x = restypes + ["X"] restypes_with_extras = restypes_with_x + ["<pad>", "<mask>", "<cls>", "<sep>", "<eos>"] def get_esmfold_tokenizer(): with TemporaryDirectory() as tempdir: vocab = "\n".join(restypes_with_extras) vocab_file = Path(tempdir) / "vocab.txt" vocab_file.write_text(vocab) hf_tokenizer = EsmTokenizer(vocab_file=str(vocab_file)) hf_tokenizer.pad_token_id = 0 # Overlaps with 'A' but that seems to be what they want return hf_tokenizer def transfer_and_check_weights(original_module, our_module): status = our_module.load_state_dict(original_module.state_dict()) if status.missing_keys: raise ValueError(f"Missing keys: {status.missing_keys}") if status.unexpected_keys: raise ValueError(f"Unexpected keys: {status.unexpected_keys}") def convert_esm_checkpoint_to_pytorch( model: str, pytorch_dump_folder_path: str, classification_head: bool, push_to_repo: str, auth_token: str ): """ Copy/paste/tweak esm's weights to our BERT structure. """ if model.startswith("esmfold"): esm = MODEL_MAPPING[model]() else: esm, alphabet = MODEL_MAPPING[model]() esm.eval() # disable dropout if model.startswith("esmfold"): embed_dim = esm.esm.embed_dim num_layers = esm.esm.num_layers num_attention_heads = esm.esm.attention_heads intermediate_size = 4 * embed_dim token_dropout = esm.esm.token_dropout emb_layer_norm_before = False # This code path does not exist in ESM-2 position_embedding_type = "rotary" is_folding_model = True esmfold_config = EsmFoldConfig() for key, val in esm.cfg.items(): if hasattr(esmfold_config, key) and key != "trunk": setattr(esmfold_config, key, val) for key, val in esm.cfg.trunk.items(): if hasattr(esmfold_config.trunk, key) and key != "structure_module": setattr(esmfold_config.trunk, key, val) for key, val in esm.cfg.trunk.structure_module.items(): if hasattr(esmfold_config.trunk.structure_module, key): setattr(esmfold_config.trunk.structure_module, key, val) elif hasattr(esm, "args"): # Indicates an ESM-1b or ESM-1v model embed_dim = esm.args.embed_dim num_layers = esm.args.layers num_attention_heads = esm.args.attention_heads intermediate_size = esm.args.ffn_embed_dim token_dropout = esm.args.token_dropout emb_layer_norm_before = True if esm.emb_layer_norm_before else False position_embedding_type = "absolute" is_folding_model = False esmfold_config = None else: # Indicates an ESM-2 model embed_dim = esm.embed_dim num_layers = esm.num_layers num_attention_heads = esm.attention_heads intermediate_size = 4 * embed_dim # This is hardcoded in ESM-2 token_dropout = esm.token_dropout emb_layer_norm_before = False # This code path does not exist in ESM-2 position_embedding_type = "rotary" is_folding_model = False esmfold_config = None if is_folding_model: alphabet = esm.esm.alphabet vocab_list = tuple(alphabet.all_toks) mask_token_id = alphabet.mask_idx pad_token_id = alphabet.padding_idx if is_folding_model: original_esm_model = esm.esm else: original_esm_model = esm config = EsmConfig( vocab_size=original_esm_model.embed_tokens.num_embeddings, mask_token_id=mask_token_id, hidden_size=embed_dim, num_hidden_layers=num_layers, num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, max_position_embeddings=1026, layer_norm_eps=1e-5, # PyTorch default used in fairseq attention_probs_dropout_prob=0.0, hidden_dropout_prob=0.0, pad_token_id=pad_token_id, emb_layer_norm_before=emb_layer_norm_before, token_dropout=token_dropout, position_embedding_type=position_embedding_type, is_folding_model=is_folding_model, esmfold_config=esmfold_config, vocab_list=vocab_list, ) if classification_head: config.num_labels = esm.classification_heads["mnli"].out_proj.weight.shape[0] print("Our ESM config:", config) if model.startswith("esmfold"): model_class = EsmForProteinFolding elif classification_head: model_class = EsmForSequenceClassification else: model_class = EsmForMaskedLM model = model_class(config) model.eval() # Now let's copy all the weights. # Embeddings model.esm.embeddings.word_embeddings.weight = original_esm_model.embed_tokens.weight if position_embedding_type == "absolute": model.esm.embeddings.position_embeddings.weight = original_esm_model.embed_positions.weight if config.emb_layer_norm_before: model.esm.embeddings.layer_norm.weight = original_esm_model.emb_layer_norm_before.weight model.esm.embeddings.layer_norm.bias = original_esm_model.emb_layer_norm_before.bias model.esm.encoder.emb_layer_norm_after.weight = original_esm_model.emb_layer_norm_after.weight model.esm.encoder.emb_layer_norm_after.bias = original_esm_model.emb_layer_norm_after.bias for i in range(config.num_hidden_layers): # Encoder: start of layer layer: EsmLayer = model.esm.encoder.layer[i] # esm_layer: TransformerSentenceEncoderLayer = original_esm_model.layers[i] esm_layer = original_esm_model.layers[i] # self attention self_attn: EsmSelfAttention = layer.attention.self assert ( esm_layer.self_attn.k_proj.weight.data.shape == esm_layer.self_attn.q_proj.weight.data.shape == esm_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)) ) self_attn.query.weight.data = esm_layer.self_attn.q_proj.weight self_attn.query.bias.data = esm_layer.self_attn.q_proj.bias self_attn.key.weight.data = esm_layer.self_attn.k_proj.weight self_attn.key.bias.data = esm_layer.self_attn.k_proj.bias self_attn.value.weight.data = esm_layer.self_attn.v_proj.weight self_attn.value.bias.data = esm_layer.self_attn.v_proj.bias if getattr(esm_layer.self_attn, "rot_emb", None) is not None: # Matt: Although inv_freq is not a trainable weight, it is computed at model init and cached. # During the training of ESM-2 the model was converted to float16 precision, which also converts # the inv_freq tensor, and the loss of precision remains even if the model is loaded later as float32. # If we recompute inv_freq without this loss of precision then we will get subtly different rotary # embeddings, which are enough to cause significant discrepancies in model outputs. To avoid this, # we make sure the new model copies the data from the old inv_freq. self_attn.rotary_embeddings.inv_freq.data = esm_layer.self_attn.rot_emb.inv_freq # LayerNorm changes for pre-activation layer.attention.LayerNorm.weight = esm_layer.self_attn_layer_norm.weight layer.attention.LayerNorm.bias = esm_layer.self_attn_layer_norm.bias layer.LayerNorm.weight = esm_layer.final_layer_norm.weight layer.LayerNorm.bias = esm_layer.final_layer_norm.bias # self-attention output self_output: EsmSelfOutput = layer.attention.output assert self_output.dense.weight.shape == esm_layer.self_attn.out_proj.weight.shape self_output.dense.weight = esm_layer.self_attn.out_proj.weight self_output.dense.bias = esm_layer.self_attn.out_proj.bias # intermediate intermediate: EsmIntermediate = layer.intermediate assert intermediate.dense.weight.shape == esm_layer.fc1.weight.shape intermediate.dense.weight = esm_layer.fc1.weight intermediate.dense.bias = esm_layer.fc1.bias # output bert_output: EsmOutput = layer.output assert bert_output.dense.weight.shape == esm_layer.fc2.weight.shape bert_output.dense.weight = esm_layer.fc2.weight bert_output.dense.bias = esm_layer.fc2.bias # end of layer if is_folding_model: model.esm_s_combine.data = esm.esm_s_combine.data model.af2_to_esm.data = esm.af2_to_esm.data transfer_and_check_weights(esm.embedding, model.embedding) transfer_and_check_weights(esm.esm_s_mlp, model.esm_s_mlp) transfer_and_check_weights(esm.trunk, model.trunk) transfer_and_check_weights(esm.distogram_head, model.distogram_head) transfer_and_check_weights(esm.ptm_head, model.ptm_head) transfer_and_check_weights(esm.lm_head, model.lm_head) transfer_and_check_weights(esm.lddt_head, model.lddt_head) elif classification_head: model.classifier.dense.weight = esm.esm.classification_heads["mnli"].dense.weight model.classifier.dense.bias = esm.classification_heads["mnli"].dense.bias model.classifier.out_proj.weight = esm.classification_heads["mnli"].out_proj.weight model.classifier.out_proj.bias = esm.classification_heads["mnli"].out_proj.bias else: # LM Head model.lm_head.dense.weight = esm.lm_head.dense.weight model.lm_head.dense.bias = esm.lm_head.dense.bias model.lm_head.layer_norm.weight = esm.lm_head.layer_norm.weight model.lm_head.layer_norm.bias = esm.lm_head.layer_norm.bias model.lm_head.decoder.weight = esm.lm_head.weight model.lm_head.bias = esm.lm_head.bias # Contact prediction head transfer_and_check_weights(esm.contact_head, model.esm.contact_head) # Prepare data (first 2 sequences from ESMStructuralSplitDataset superfamily / 4) if is_folding_model: # Folding models aren't trained on masked inputs and don't like mask tokens. sample_data = SAMPLE_DATA[:2] else: sample_data = SAMPLE_DATA if is_folding_model: hf_tokenizer = get_esmfold_tokenizer() hf_tokens = hf_tokenizer( [row[1] for row in sample_data], return_tensors="pt", padding=True, add_special_tokens=False ) esmfold_aas, esmfold_mask, _, _, _ = esmfold_encode_sequences([row[1] for row in sample_data]) success = torch.all(hf_tokens["input_ids"] == esmfold_aas) and torch.all( hf_tokens["attention_mask"] == esmfold_mask ) else: # Let's check that we get the same results. batch_converter = alphabet.get_batch_converter() batch_labels, batch_strs, batch_tokens = batch_converter(sample_data) # Prepare tokenizer and make sure it matches with TemporaryDirectory() as tempdir: vocab = "\n".join(alphabet.all_toks) vocab_file = Path(tempdir) / "vocab.txt" vocab_file.write_text(vocab) hf_tokenizer = EsmTokenizer(vocab_file=str(vocab_file)) hf_tokens = hf_tokenizer([row[1] for row in sample_data], return_tensors="pt", padding=True) success = torch.all(hf_tokens["input_ids"] == batch_tokens) print("Do both models tokenizers output the same tokens?", "🔥" if success else "💩") if not success: raise Exception("Tokenization does not match!") with torch.no_grad(): if is_folding_model: # Let's test the model in parts # ESMFold always converts the ESM stem to float16, which requires float16 ops # that don't exist on CPU. Therefore, to test it we need to run it on GPU. However, # ESMFold is what we in the community call a "big boy" and so we desperately avoid putting both the # original and the converted model on the GPU at the same time. their_output = esm.cuda().infer([row[1] for row in sample_data]) our_output = model.cuda()( input_ids=hf_tokens["input_ids"].cuda(), attention_mask=hf_tokens["attention_mask"].cuda() ) else: our_output = model(**hf_tokens, output_hidden_states=True) our_output = our_output["logits"] if classification_head: their_output = esm.model.classification_heads["mnli"](esm.extract_features(batch_tokens)) else: their_output = esm(hf_tokens["input_ids"], repr_layers=list(range(999))) their_output = their_output["logits"] if is_folding_model: max_absolute_diff = torch.max(torch.abs(our_output["positions"] - their_output["positions"])).item() success = torch.allclose(our_output["positions"], their_output["positions"], atol=1e-5) else: max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item() success = torch.allclose(our_output, their_output, atol=1e-5) print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-5 print("Do both models output the same tensors?", "🔥" if success else "💩") if not success: raise Exception("Something went wRoNg") if not is_folding_model: # Let's check contact prediction too our_output = model.predict_contacts(hf_tokens["input_ids"], hf_tokens["attention_mask"]) their_output = esm.predict_contacts(hf_tokens["input_ids"]) max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item() success = torch.allclose(our_output, their_output, atol=1e-5) print("Contact prediction testing:") print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-5 print("Do both models output the same tensors?", "🔥" if success else "💩") if not success: raise Exception("Something went wRoNg") pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) del esm # Free up some memory before continuing print(f"Saving tokenizer to {pytorch_dump_folder_path}") hf_tokenizer.save_pretrained(pytorch_dump_folder_path) if push_to_repo: model.push_to_hub(repo_id=push_to_repo, token_token=auth_token) hf_tokenizer.push_to_hub(repo_id=push_to_repo, token_token=auth_token) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_dump_folder_path", type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) parser.add_argument("--model", default=None, type=str, required=True, help="Name of model to convert.") parser.add_argument("--push_to_repo", type=str, help="Repo to upload to (including username!).") parser.add_argument("--auth_token", type=str, help="HuggingFace auth token.") args = parser.parse_args() convert_esm_checkpoint_to_pytorch( args.model, args.pytorch_dump_folder_path, args.classification_head, args.push_to_repo, args.auth_token )
transformers/src/transformers/models/esm/convert_esm.py/0
{ "file_path": "transformers/src/transformers/models/esm/convert_esm.py", "repo_id": "transformers", "token_count": 8133 }
326
import json from argparse import ArgumentParser from pathlib import Path """ This script converts Falcon custom code checkpoints to modern Falcon checkpoints that use code in the Transformers library. After conversion, performance (especially for generation) should improve and the checkpoint can be loaded without needing trust_remote_code=True. """ if __name__ == "__main__": parser = ArgumentParser() parser.add_argument( "--checkpoint_dir", type=Path, required=True, help="Directory containing a custom code checkpoint to convert to a modern Falcon checkpoint.", ) args = parser.parse_args() if not args.checkpoint_dir.is_dir(): raise ValueError("--checkpoint_dir argument should be a directory!") if ( not (args.checkpoint_dir / "configuration_RW.py").is_file() or not (args.checkpoint_dir / "modelling_RW.py").is_file() ): raise ValueError( "The model directory should contain configuration_RW.py and modelling_RW.py files! Are you sure this is a custom code checkpoint?" ) (args.checkpoint_dir / "configuration_RW.py").unlink() (args.checkpoint_dir / "modelling_RW.py").unlink() config = args.checkpoint_dir / "config.json" text = config.read_text() text = text.replace("RWForCausalLM", "FalconForCausalLM") text = text.replace("RefinedWebModel", "falcon") text = text.replace("RefinedWeb", "falcon") json_config = json.loads(text) del json_config["auto_map"] if "n_head" in json_config: json_config["num_attention_heads"] = json_config.pop("n_head") if "n_layer" in json_config: json_config["num_hidden_layers"] = json_config.pop("n_layer") if "n_head_kv" in json_config: json_config["num_kv_heads"] = json_config.pop("n_head_kv") json_config["new_decoder_architecture"] = True else: json_config["new_decoder_architecture"] = False bos_token_id = json_config.get("bos_token_id", 1) eos_token_id = json_config.get("eos_token_id", 2) config.unlink() config.write_text(json.dumps(json_config, indent=2, sort_keys=True)) tokenizer_config = args.checkpoint_dir / "tokenizer_config.json" if tokenizer_config.is_file(): text = tokenizer_config.read_text() json_config = json.loads(text) if json_config["tokenizer_class"] == "PreTrainedTokenizerFast": json_config["model_input_names"] = ["input_ids", "attention_mask"] tokenizer_config.unlink() tokenizer_config.write_text(json.dumps(json_config, indent=2, sort_keys=True)) generation_config_path = args.checkpoint_dir / "generation_config.json" generation_dict = { "_from_model_config": True, "bos_token_id": bos_token_id, "eos_token_id": eos_token_id, "transformers_version": "4.33.0.dev0", } generation_config_path.write_text(json.dumps(generation_dict, indent=2, sort_keys=True)) print("Done! Please double-check that the new checkpoint works as expected.")
transformers/src/transformers/models/falcon/convert_custom_code_checkpoint.py/0
{ "file_path": "transformers/src/transformers/models/falcon/convert_custom_code_checkpoint.py", "repo_id": "transformers", "token_count": 1171 }
327
# coding=utf-8 # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def rreplace(s, old, new, occurrence): li = s.rsplit(old, occurrence) return new.join(li) def count_parameters(state_dict): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items()) def upgrade_state_dict(state_dict): upgrade = {} group_keys = ["group_1", "group_2", "group_3", "group_4"] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: key = key.replace(f"{group_key}.", f"{group_key}.group.") if "res_path" in key: key = key.replace("res_path.", "res_path.path.") if key.endswith(".w"): key = rreplace(key, ".w", ".weight", 1) if key.endswith(".b"): key = rreplace(key, ".b", ".bias", 1) upgrade[key] = value.float() return upgrade @torch.no_grad() def convert_dalle_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None, save_checkpoint=True): """ Copy/paste/tweak model's weights to transformers design. """ from dall_e import Encoder encoder = Encoder() if os.path.exists(checkpoint_path): ckpt = torch.load(checkpoint_path) else: ckpt = torch.hub.load_state_dict_from_url(checkpoint_path) if isinstance(ckpt, Encoder): ckpt = ckpt.state_dict() encoder.load_state_dict(ckpt) if config_path is not None: config = FlavaImageCodebookConfig.from_pretrained(config_path) else: config = FlavaImageCodebookConfig() hf_model = FlavaImageCodebook(config).eval() state_dict = encoder.state_dict() hf_state_dict = upgrade_state_dict(state_dict) hf_model.load_state_dict(hf_state_dict) hf_state_dict = hf_model.state_dict() hf_count = count_parameters(hf_state_dict) state_dict_count = count_parameters(state_dict) assert torch.allclose(hf_count, state_dict_count, atol=1e-3) if save_checkpoint: hf_model.save_pretrained(pytorch_dump_folder_path) else: return hf_state_dict if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") args = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
transformers/src/transformers/models/flava/convert_dalle_to_flava_codebook.py/0
{ "file_path": "transformers/src/transformers/models/flava/convert_dalle_to_flava_codebook.py", "repo_id": "transformers", "token_count": 1300 }
328
# coding=utf-8 # Copyright 2023 HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Fuyu model.""" from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...modeling_outputs import CausalLMOutputWithPast from ...modeling_utils import PreTrainedModel from ...models.auto.modeling_auto import AutoModelForCausalLM from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_fuyu import FuyuConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "FuyuConfig" FUYU_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`FuyuConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare Fuyu Model outputting raw hidden-states without any specific head on top.", FUYU_START_DOCSTRING, ) class FuyuPreTrainedModel(PreTrainedModel): config_class = FuyuConfig base_model_prefix = "fuyu" supports_gradient_checkpointing = True _no_split_modules = [] _skip_keys_device_placement = "past_key_values" def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() FUYU_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. image_patches (`torch.FloatTensor` of shape `(batch_size, num_total_patches, patch_size_ x patch_size x num_channels)`, *optional*): Image patches to be used as continuous embeddings. The patches are flattened and then projected to the hidden size of the model. image_patches_indices (`torch.LongTensor` of shape `(batch_size, num_total_patches + number_of_newline_tokens + number_of_text_tokens, patch_size_ x patch_size x num_channels )`, *optional*): Indices indicating at which position the image_patches have to be inserted in input_embeds. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "Fuyu Model with a language modeling head on top for causal language model conditioned on image patches and text.", FUYU_START_DOCSTRING, ) class FuyuForCausalLM(FuyuPreTrainedModel): def __init__(self, config: FuyuConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.language_model = AutoModelForCausalLM.from_config(config.text_config) self.vision_embed_tokens = nn.Linear( config.patch_size * config.patch_size * config.num_channels, config.hidden_size ) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def gather_continuous_embeddings( self, word_embeddings: torch.Tensor, continuous_embeddings: List[torch.Tensor], image_patch_input_indices: torch.Tensor, ) -> torch.Tensor: """This function places the continuous_embeddings into the word_embeddings at the locations indicated by image_patch_input_indices. Different batch elements can have different numbers of continuous embeddings. Args: word_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Tensor of word embeddings. continuous_embeddings (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Tensor of continuous embeddings. The length of the list is the batch size. Each entry is shape [num_image_embeddings, hidden], and num_image_embeddings needs to match the number of non-negative indices in image_patch_input_indices for that batch element. image_patch_input_indices (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Tensor of indices of the image patches in the input_ids tensor. """ if not (word_embeddings.shape[0] == len(continuous_embeddings)): raise ValueError( f"Batch sizes must match! Got {len(continuous_embeddings)=} and {word_embeddings.shape[0]=}" ) output_embeddings = word_embeddings.clone() for batch_idx in range(word_embeddings.shape[0]): # First, find the positions of all the non-negative values in image_patch_input_indices, those are the # positions in word_embeddings that we want to replace with content from continuous_embeddings. dst_indices = torch.nonzero(image_patch_input_indices[batch_idx] >= 0, as_tuple=True)[0] # Next look up those indices in image_patch_input_indices to find the indices in continuous_embeddings that we # want to use to replace the values in word_embeddings. src_indices = image_patch_input_indices[batch_idx][dst_indices] # Check if we have more indices than embeddings. Note that we could have fewer indices if images got truncated. if src_indices.shape[0] > continuous_embeddings[batch_idx].shape[0]: raise ValueError( f"Number of continuous embeddings {continuous_embeddings[batch_idx].shape=} does not match " f"number of continuous token ids {src_indices.shape=} in batch element {batch_idx}." ) output_embeddings[batch_idx, dst_indices] = continuous_embeddings[batch_idx][src_indices] return output_embeddings @add_start_docstrings_to_model_forward(FUYU_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, image_patches: torch.Tensor = None, # [batch_size, num_total_patches, patch_size_ x patch_size x num_channels ] image_patches_indices: torch.Tensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Examples: ```python >>> from transformers import FuyuProcessor, FuyuForCausalLM >>> from PIL import Image >>> import requests >>> processor = FuyuProcessor.from_pretrained("adept/fuyu-8b") >>> model = FuyuForCausalLM.from_pretrained("adept/fuyu-8b") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> prompt = "Generate a coco-style caption.\n" >>> inputs = processor(text=text_prompt, images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> generated_ids = model.generate(**model_inputs, max_new_tokens=7) >>> generation_text = processor.batch_decode(generated_ids, skip_special_tokens=True) >>> print(generation_text) 'A bus parked on the side of a road.' ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_is or inputs_embeds") seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0) if inputs_embeds is None: inputs_embeds = self.language_model.get_input_embeddings()(input_ids) if image_patches is not None and past_key_values is None: patch_embeddings = [ self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype)).squeeze(0) for patch in image_patches ] inputs_embeds = self.gather_continuous_embeddings( word_embeddings=inputs_embeds, continuous_embeddings=patch_embeddings, image_patch_input_indices=image_patches_indices, ) outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, labels=labels, use_cache=use_cache, return_dict=return_dict, ) return outputs def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, image_patches=None, image_patches_indices=None, **kwargs, ): if past_key_values: input_ids = input_ids[:, -1:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} if image_patches_indices is not None: model_inputs["image_patches_indices"] = image_patches_indices model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "image_patches_indices": image_patches_indices if past_key_values is None else None, "image_patches": image_patches if past_key_values is None else None, } ) return model_inputs
transformers/src/transformers/models/fuyu/modeling_fuyu.py/0
{ "file_path": "transformers/src/transformers/models/fuyu/modeling_fuyu.py", "repo_id": "transformers", "token_count": 7181 }
329
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OpenAI GPT checkpoint.""" import argparse import torch from transformers import GPT2Config, GPT2Model, load_tf_weights_in_gpt2 from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path): # Construct model if gpt2_config_file == "": config = GPT2Config() else: config = GPT2Config.from_json_file(gpt2_config_file) model = GPT2Model(config) # Load weights from numpy load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path) # Save pytorch-model pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME print(f"Save PyTorch model to {pytorch_weights_dump_path}") torch.save(model.state_dict(), pytorch_weights_dump_path) print(f"Save configuration file to {pytorch_config_dump_path}") with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--gpt2_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) args = parser.parse_args() convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path, args.gpt2_config_file, args.pytorch_dump_folder_path)
transformers/src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 937 }
330
# coding=utf-8 # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ GPTNeoX model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class GPTNeoXConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTNeoXModel`]. It is used to instantiate an GPTNeoX model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTNeoX [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50432): Vocabulary size of the GPTNeoX model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPTNeoXModel`]. hidden_size (`int`, *optional*, defaults to 6144): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 44): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 24576): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. rotary_pct (`float`, *optional*, defaults to 0.25): percentage of hidden dimensions to allocate to rotary embeddings rotary_emb_base (`int`, *optional*, defaults to 10000) base for computing rotary embeddings frequency attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio probability of the attention score. hidden_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio of (1) the word embeddings, (2) the post-attention hidden states, and (3) the post-mlp hidden states. classifier_dropout (`float`, *optional*, defaults to 0.1): Argument used when doing token classification, used in the model [`GPTNeoXForTokenClassification`]. The dropout ratio for the hidden layer. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 1e-5): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. use_parallel_residual (`bool`, *optional*, defaults to `True`): Whether to use a "parallel" formulation in each Transformer layer, which can provide a slight training speedup at large scales (e.g. 20B). rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. attention_bias (`bool`, *optional*, defaults to `True`): Whether to use a bias in the query, key, value and output projection layers during self-attention. Example: ```python >>> from transformers import GPTNeoXConfig, GPTNeoXModel >>> # Initializing a GPTNeoX gpt-neox-20b style configuration >>> configuration = GPTNeoXConfig() >>> # Initializing a model (with random weights) from the gpt-neox-20b style configuration >>> model = GPTNeoXModel(configuration) # doctest: +SKIP >>> # Accessing the model configuration >>> configuration = model.config # doctest: +SKIP ```""" model_type = "gpt_neox" def __init__( self, vocab_size=50432, hidden_size=6144, num_hidden_layers=44, num_attention_heads=64, intermediate_size=24576, hidden_act="gelu", rotary_pct=0.25, rotary_emb_base=10000, attention_dropout=0.0, hidden_dropout=0.0, classifier_dropout=0.1, max_position_embeddings=2048, initializer_range=0.02, layer_norm_eps=1e-5, use_cache=True, bos_token_id=0, eos_token_id=2, tie_word_embeddings=False, use_parallel_residual=True, rope_scaling=None, attention_bias=True, **kwargs, ): super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.rotary_pct = rotary_pct self.rotary_emb_base = rotary_emb_base self.attention_dropout = attention_dropout self.hidden_dropout = hidden_dropout self.classifier_dropout = classifier_dropout self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.tie_word_embeddings = tie_word_embeddings self.use_parallel_residual = use_parallel_residual self.rope_scaling = rope_scaling self.attention_bias = attention_bias self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " f"got {self.rope_scaling}" ) rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_factor = self.rope_scaling.get("factor", None) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
transformers/src/transformers/models/gpt_neox/configuration_gpt_neox.py/0
{ "file_path": "transformers/src/transformers/models/gpt_neox/configuration_gpt_neox.py", "repo_id": "transformers", "token_count": 3539 }
331
# coding=utf-8 # Copyright 2023, HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ GPTSAN-japanese model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP = { "tanreinama/GPTSAN-2.8B-spout_is_uniform": ( "https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json" ), } class GPTSanJapaneseConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTSanJapaneseModel`]. It is used to instantiate a GPTSANJapanese model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTSANJapanese [Tanrei/GPTSAN-japanese](https://huggingface.co/Tanrei/GPTSAN-japanese) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 36000): Vocabulary size of the GPTSANJapanese model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPTSanJapaneseModel`]. max_position_embeddings (`int`, *optional*, defaults to 1280): The maximum sequence length that this model might ever be used with. Defaults set this to 1280. d_model (`int`, *optional*, defaults to 1024): Size of the encoder layers and the pooler layer. d_ff (`int`, *optional*, defaults to 8192): Size of the intermediate feed forward layer in each `SwitchTransformersBlock`. d_ext (`int`, *optional*, defaults to 4096): Size of the intermediate feed forward layer in each Extra-layers. d_spout (`int`, *optional*, defaults to 128): Size of the `spout` vector. num_switch_layers (`int`, *optional*, defaults to 10): Number of layers in the Switch Transformer layer. num_ext_layers (`int`, *optional*, defaults to 0): Number of layers in the Extra-layers. num_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_experts (`int`, *optional*, defaults to 16): Number of experts for each SwitchTransformer layer. expert_capacity (`int`, *optional*, defaults to 128): Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular Transformer. dropout_rate (`float`, *optional*, defaults to 0.0): The ratio for all dropout layers. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. router_bias (`bool`, *optional*, defaults to `False`): Whether to add a bias to the router. router_jitter_noise (`float`, *optional*, defaults to 0.0): Amount of noise to add to the router. Set it to 0.0 during prediction or set small value (usually 1e-2) during training. router_dtype (`str`, *optional*, default to `"float32"`): The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the *selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961). router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`): Whether to ignore padding tokens when routing. output_hidden_states (`bool`, *optional*, default to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. initializer_factor (`float`, *optional*, defaults to 0.002): A factor for initializing all weight matrices. output_router_logits (`bool`, *optional*, default to `False`): Whether or not to return the router logits of all experts. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) """ model_type = "gptsan-japanese" keys_to_ignore_at_inference = [ "past_key_values", ] attribute_map = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, vocab_size=36000, max_position_embeddings=1280, d_model=1024, d_ff=8192, d_ext=4096, d_spout=128, num_switch_layers=10, num_ext_layers=0, num_heads=16, num_experts=16, expert_capacity=128, dropout_rate=0.0, layer_norm_epsilon=1e-5, router_bias=False, router_jitter_noise=0.0, router_dtype="float32", router_ignore_padding_tokens=False, output_hidden_states=False, output_attentions=False, initializer_factor=0.002, output_router_logits=False, use_cache=True, separator_token_id=35998, pad_token_id=35995, eos_token_id=35999, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.d_ff = d_ff self.d_ext = d_ext self.d_spout = d_spout self.num_switch_layers = num_switch_layers self.num_ext_layers = num_ext_layers self.num_layers = num_switch_layers + num_ext_layers self.num_heads = num_heads self.num_experts = num_experts self.expert_capacity = expert_capacity self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.router_bias = router_bias self.router_jitter_noise = router_jitter_noise self.router_dtype = router_dtype self.router_ignore_padding_tokens = router_ignore_padding_tokens self.output_hidden_states = output_hidden_states self.output_attentions = output_attentions self.initializer_factor = initializer_factor self.output_router_logits = output_router_logits self.use_cache = use_cache super().__init__( separator_token_id=separator_token_id, pad_token_id=pad_token_id, eos_token_id=eos_token_id, **kwargs, )
transformers/src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py/0
{ "file_path": "transformers/src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py", "repo_id": "transformers", "token_count": 2956 }
332
# coding=utf-8 # Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json" }, "merges_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt" }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"allegro/herbert-base-cased": 514} PRETRAINED_INIT_CONFIGURATION = {} class HerbertTokenizerFast(PreTrainedTokenizerFast): """ Construct a "Fast" BPE tokenizer for HerBERT (backed by HuggingFace's *tokenizers* library). Peculiarities: - uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a punctuation character will be treated separately. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = HerbertTokenizer def __init__( self, vocab_file=None, merges_file=None, tokenizer_file=None, cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", sep_token="</s>", **kwargs, ): super().__init__( vocab_file, merges_file, tokenizer_file=tokenizer_file, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, sep_token=sep_token, **kwargs, ) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An HerBERT, like BERT sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ cls = [self.cls_token_id] sep = [self.sep_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. HerBERT, like BERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
transformers/src/transformers/models/herbert/tokenization_herbert_fast.py/0
{ "file_path": "transformers/src/transformers/models/herbert/tokenization_herbert_fast.py", "repo_id": "transformers", "token_count": 2812 }
333
# This code was adapted from https://github.com/lucidrains/flamingo-pytorch licensed under the MIT License. # # MIT License # # Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Generic interface to various configurations of the Perceiver Resampler, that simply takes in a series of (potentially time-indexed) contextual embeddings, and "resamples" (compresses) them down to a pre-specified number of latents! Note that the Perceiver in general resamples based solely off the *long-range* context; there's a nice opportunity here to prime the Perceiver Resampler with say a single layer's worth of language embeddings (the target domain), and use that to softly "retrieve & compress" what we need --> this would be a novel contribution we should explore. References: - DeepMind's Flamingo: https://www.deepmind.com/blog/tackling-multiple-tasks-with-a-single-visual-language-model - Code borrowed w/ love from: https://github.com/lucidrains/flamingo-pytorch """ from typing import Optional, Tuple import torch import torch.nn as nn from .configuration_idefics import IdeficsConfig class IdeficsPerceiverResampler(nn.Module): def __init__( self, config: IdeficsConfig, embed_dim: int, depth: int, n_heads: int, head_dim: int, n_latents: int ) -> None: """ Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then returns a Tensor of shape [bsz, n_latents, embed_dim]. :param embed_dim: Dimensionality of embeddings being fed to the Perceiver Resampler (also dimensionality of latent embeddings *returned* by the Perceiver Resampler. Could be e.g., VIT embed_dim, ResNet pool dim, and so on. Args: config (`IdeficsConfig`): config object embed_dim (`int`): The size of each embedding vector depth (`int`): Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3). n_heads (`int`): Number of heads in each Transformer block (for multi-headed self-attention). head_dim (`int`): Dimensionality of each head projection in the Transformer block. n_latents (`int`): Number of latent embeddings to resample ("compress") the input sequence to (usually < 128). """ super().__init__() self.embed_dim, self.n_heads, self.head_dim, self.n_latents = embed_dim, n_heads, head_dim, n_latents self.qk_layer_norms = config.perceiver_config.qk_layer_norms_perceiver # Create Latents for Perceiver self.latents = nn.Parameter(torch.randn(self.n_latents, self.embed_dim), requires_grad=True) self.intermediate_dim = ( self.embed_dim * 4 if not hasattr(config.vision_config, "embed_dim") else config.vision_config.embed_dim * 4 ) # Create Transformer Blocks self.blocks = nn.ModuleList( [ nn.ModuleList( [ IdeficsPerceiverAttention(self.embed_dim, self.n_heads, self.head_dim, self.qk_layer_norms), IdeficsMLP(self.intermediate_dim, config), ] ) for _ in range(depth) ] ) self.layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, context: torch.Tensor) -> torch.Tensor: """Resample arbitrary length context & *compress* down to self.n_latents latent embeddings""" # einsum.repeat(self.latents, "seq embed -> bsz seq embed", bsz=context.shape[0]) latents = self.latents.repeat(context.shape[0], 1, 1) # Feed through Perceiver Attention blocks... for attn, ff in self.blocks: latents = attn(context, latents) + latents latents = ff(latents) + latents return self.layer_norm(latents) class IdeficsPerceiverAttention(nn.Module): def __init__(self, embed_dim: int, n_heads: int, head_dim: int, qk_layer_norms: bool) -> None: """Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`""" super().__init__() self.embed_dim, self.n_heads, self.head_dim = embed_dim, n_heads, head_dim self.qk_layer_norms = qk_layer_norms # Normalization & Scaling self.context_layer_norm = nn.LayerNorm(self.embed_dim) self.latents_layer_norm = nn.LayerNorm(self.embed_dim) if self.qk_layer_norms: self.q_layer_norm = nn.LayerNorm(self.head_dim) self.k_layer_norm = nn.LayerNorm(self.head_dim) self.qk_scale = self.head_dim**-0.5 # Q, K, V Projection (no bias -- detail from Perceiver/Flamingo Papers). self.q_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False) self.output_proj = nn.Linear(self.n_heads * self.head_dim, embed_dim, bias=False) def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor: """ Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension! Args: context (`torch.Tensor`): Tensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample. latents (`torch.Tensor`): Tensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latents to compress to. Returns: `torch.Tensor`: Tensor of shape `[bsz, n_latents, embed_dim]` representing attention over latents w/ cross from context. """ context = self.context_layer_norm(context) latents = self.latents_layer_norm(latents) batch_size, seq_length, embed_dim = context.shape[:3] # Query, Key, Value Projections --> Note that in Flamingo, latents are *concatenated* with context prior to attn! # Note: This results in queries w/ `seq = n_latents`, and keys, values with `seq = len(context) + n_latents` q = self.q_proj(latents) k = self.k_proj(torch.cat([context, latents], dim=-2)) v = self.v_proj(torch.cat([context, latents], dim=-2)) # Multiheaded Self-Attention w/ stable softmax (subtract per-row max -- `amax` -- before softmax call) # =>> `attn` should be a 2D matrix of shape [n_latents x (context + n_latents)] # einsum.rearrange(x, "bsz seq (heads embed) -> bsz heads seq embed", heads=self.n_heads) q, k, v = [x.reshape(batch_size, x.shape[1], self.n_heads, self.head_dim).transpose(1, 2) for x in (q, k, v)] if self.qk_layer_norms: q = self.q_layer_norm(q) k = self.k_layer_norm(k) scores = torch.einsum("... i d, ... j d -> ... i j", q * self.qk_scale, k) stabilized_scores = scores - (scores.amax(dim=-1, keepdim=True).detach()) attn = stabilized_scores.softmax(dim=-1) # Attend & project back to output... resampled = torch.einsum("... i j, ... j d -> ... i d", attn, v) # einsum.rearrange(resampled, "bsz heads seq embed -> bsz seq (heads embed)", heads=self.n_heads) return self.output_proj(resampled.transpose(1, 2).flatten(-2)) class IdeficsMLP(nn.Module): def __init__(self, intermediate_size, config: IdeficsConfig): """Simple MLP block with intermediate_size and embedding size""" super().__init__() self.embed_dim = config.vision_config.embed_dim self.ln = nn.LayerNorm(self.embed_dim) self.fc = nn.Linear(self.embed_dim, intermediate_size, bias=False) self.act = nn.ReLU() self.c_proj = nn.Linear(intermediate_size, self.embed_dim, bias=False) def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.ln(hidden_states) hidden_states = self.fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) return hidden_states
transformers/src/transformers/models/idefics/perceiver.py/0
{ "file_path": "transformers/src/transformers/models/idefics/perceiver.py", "repo_id": "transformers", "token_count": 3755 }
334
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for LeViT.""" from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( get_resize_output_image_size, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging logger = logging.get_logger(__name__) class LevitImageProcessor(BaseImageProcessor): r""" Constructs a LeViT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Wwhether to resize the shortest edge of the input to int(256/224 *`size`). Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]`, *optional*, defaults to `{"shortest_edge": 224}`): Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will be resized to `(size["height"], size["width"])`. If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`. The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to `(size["shortest_egde"] * height / width, size["shortest_egde"])`. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether or not to center crop the input to `(crop_size["height"], crop_size["width"])`. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`Dict`, *optional*, defaults to `{"height": 224, "width": 224}`): Desired image size after `center_crop`. Can be overridden by the `crop_size` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`List[int]`, *optional*, defaults to `[0.485, 0.456, 0.406]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`List[int]`, *optional*, defaults to `[0.229, 0.224, 0.225]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN, image_std: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, param_name="crop_size") self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. If size is a dict with keys "width" and "height", the image will be resized to `(size["height"], size["width"])`. If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`. The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to `(size["shortest_egde"] * height / width, size["shortest_egde"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to (size * height / width, size). resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size_dict = get_size_dict(size, default_to_square=False) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: shortest_edge = int((256 / 224) * size["shortest_edge"]) output_size = get_resize_output_image_size( image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format ) size_dict = {"height": output_size[0], "width": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" ) return resize( image, size=(size_dict["height"], size_dict["width"]), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, do_center_crop: Optional[bool] = None, crop_size: Optional[Dict[str, int]] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, Iterable[float]]] = None, image_std: Optional[Union[float, Iterable[float]]] = None, return_tensors: Optional[TensorType] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> BatchFeature: """ Preprocess an image or batch of images to be used as input to a LeViT model. Args: images (`ImageInput`): Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to (size * height / width, size). resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the output image after center cropping. Crops images to (crop_size["height"], crop_size["width"]). do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image pixel values by `rescaling_factor` - typical to values between 0 and 1. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Factor to rescale the image pixel values by. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image pixel values by `image_mean` and `image_std`. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Mean to normalize the image pixel values by. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Standard deviation to normalize the image pixel values by. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image, size, resample, input_data_format=input_data_format) for image in images] if do_center_crop: images = [self.center_crop(image, crop_size, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
transformers/src/transformers/models/levit/image_processing_levit.py/0
{ "file_path": "transformers/src/transformers/models/levit/image_processing_levit.py", "repo_id": "transformers", "token_count": 6797 }
335
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for Llava. """ from typing import List, Optional, Union from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class LlavaProcessor(ProcessorMixin): r""" Constructs a Llava processor which wraps a Llava image processor and a Llava tokenizer into a single processor. [`LlavaProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~LlavaProcessor.__call__`] and [`~LlavaProcessor.decode`] for more information. Args: image_processor ([`CLIPImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "CLIPImageProcessor" tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast") def __init__(self, image_processor=None, tokenizer=None): super().__init__(image_processor, tokenizer) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, images: ImageInput = None, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length=None, return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH, ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`, *optional*): Activates truncation to cut input sequences longer than `max_length` to `max_length`. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if images is not None: pixel_values = self.image_processor(images, return_tensors=return_tensors)["pixel_values"] else: pixel_values = None text_inputs = self.tokenizer( text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length ) return BatchFeature(data={**text_inputs, "pixel_values": pixel_values}) # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama def decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
transformers/src/transformers/models/llava/processing_llava.py/0
{ "file_path": "transformers/src/transformers/models/llava/processing_llava.py", "repo_id": "transformers", "token_count": 2756 }
336
# coding=utf-8 # Copyright Studio Ousia and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch LUKE model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_luke import LukeConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LukeConfig" _CHECKPOINT_FOR_DOC = "studio-ousia/luke-base" LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = [ "studio-ousia/luke-base", "studio-ousia/luke-large", # See all LUKE models at https://huggingface.co/models?filter=luke ] @dataclass class BaseLukeModelOutputWithPooling(BaseModelOutputWithPooling): """ Base class for outputs of the LUKE model. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`): Sequence of entity hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length + entity_length, sequence_length + entity_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ entity_last_hidden_state: torch.FloatTensor = None entity_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseLukeModelOutput(BaseModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`): Sequence of entity hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ entity_last_hidden_state: torch.FloatTensor = None entity_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class LukeMaskedLMOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): The sum of masked language modeling (MLM) loss and entity prediction loss. mlm_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. mep_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked entity prediction (MEP) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). entity_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None mlm_loss: Optional[torch.FloatTensor] = None mep_loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None entity_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class EntityClassificationOutput(ModelOutput): """ Outputs of entity classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class EntityPairClassificationOutput(ModelOutput): """ Outputs of entity pair classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class EntitySpanClassificationOutput(ModelOutput): """ Outputs of entity span classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, entity_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class LukeSequenceClassifierOutput(ModelOutput): """ Outputs of sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class LukeTokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class LukeQuestionAnsweringModelOutput(ModelOutput): """ Outputs of question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class LukeMultipleChoiceModelOutput(ModelOutput): """ Outputs of multiple choice models. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None entity_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None class LukeEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class LukeEntityEmbeddings(nn.Module): def __init__(self, config: LukeConfig): super().__init__() self.config = config self.entity_embeddings = nn.Embedding(config.entity_vocab_size, config.entity_emb_size, padding_idx=0) if config.entity_emb_size != config.hidden_size: self.entity_embedding_dense = nn.Linear(config.entity_emb_size, config.hidden_size, bias=False) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, entity_ids: torch.LongTensor, position_ids: torch.LongTensor, token_type_ids: torch.LongTensor = None ): if token_type_ids is None: token_type_ids = torch.zeros_like(entity_ids) entity_embeddings = self.entity_embeddings(entity_ids) if self.config.entity_emb_size != self.config.hidden_size: entity_embeddings = self.entity_embedding_dense(entity_embeddings) position_embeddings = self.position_embeddings(position_ids.clamp(min=0)) position_embedding_mask = (position_ids != -1).type_as(position_embeddings).unsqueeze(-1) position_embeddings = position_embeddings * position_embedding_mask position_embeddings = torch.sum(position_embeddings, dim=-2) position_embeddings = position_embeddings / position_embedding_mask.sum(dim=-2).clamp(min=1e-7) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = entity_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class LukeSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.use_entity_aware_attention = config.use_entity_aware_attention self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) if self.use_entity_aware_attention: self.w2e_query = nn.Linear(config.hidden_size, self.all_head_size) self.e2w_query = nn.Linear(config.hidden_size, self.all_head_size) self.e2e_query = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): word_size = word_hidden_states.size(1) if entity_hidden_states is None: concat_hidden_states = word_hidden_states else: concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1) key_layer = self.transpose_for_scores(self.key(concat_hidden_states)) value_layer = self.transpose_for_scores(self.value(concat_hidden_states)) if self.use_entity_aware_attention and entity_hidden_states is not None: # compute query vectors using word-word (w2w), word-entity (w2e), entity-word (e2w), entity-entity (e2e) # query layers w2w_query_layer = self.transpose_for_scores(self.query(word_hidden_states)) w2e_query_layer = self.transpose_for_scores(self.w2e_query(word_hidden_states)) e2w_query_layer = self.transpose_for_scores(self.e2w_query(entity_hidden_states)) e2e_query_layer = self.transpose_for_scores(self.e2e_query(entity_hidden_states)) # compute w2w, w2e, e2w, and e2e key vectors used with the query vectors computed above w2w_key_layer = key_layer[:, :, :word_size, :] e2w_key_layer = key_layer[:, :, :word_size, :] w2e_key_layer = key_layer[:, :, word_size:, :] e2e_key_layer = key_layer[:, :, word_size:, :] # compute attention scores based on the dot product between the query and key vectors w2w_attention_scores = torch.matmul(w2w_query_layer, w2w_key_layer.transpose(-1, -2)) w2e_attention_scores = torch.matmul(w2e_query_layer, w2e_key_layer.transpose(-1, -2)) e2w_attention_scores = torch.matmul(e2w_query_layer, e2w_key_layer.transpose(-1, -2)) e2e_attention_scores = torch.matmul(e2e_query_layer, e2e_key_layer.transpose(-1, -2)) # combine attention scores to create the final attention score matrix word_attention_scores = torch.cat([w2w_attention_scores, w2e_attention_scores], dim=3) entity_attention_scores = torch.cat([e2w_attention_scores, e2e_attention_scores], dim=3) attention_scores = torch.cat([word_attention_scores, entity_attention_scores], dim=2) else: query_layer = self.transpose_for_scores(self.query(concat_hidden_states)) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in LukeModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) output_word_hidden_states = context_layer[:, :word_size, :] if entity_hidden_states is None: output_entity_hidden_states = None else: output_entity_hidden_states = context_layer[:, word_size:, :] if output_attentions: outputs = (output_word_hidden_states, output_entity_hidden_states, attention_probs) else: outputs = (output_word_hidden_states, output_entity_hidden_states) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class LukeSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LukeAttention(nn.Module): def __init__(self, config): super().__init__() self.self = LukeSelfAttention(config) self.output = LukeSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): raise NotImplementedError("LUKE does not support the pruning of attention heads") def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): word_size = word_hidden_states.size(1) self_outputs = self.self( word_hidden_states, entity_hidden_states, attention_mask, head_mask, output_attentions, ) if entity_hidden_states is None: concat_self_outputs = self_outputs[0] concat_hidden_states = word_hidden_states else: concat_self_outputs = torch.cat(self_outputs[:2], dim=1) concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1) attention_output = self.output(concat_self_outputs, concat_hidden_states) word_attention_output = attention_output[:, :word_size, :] if entity_hidden_states is None: entity_attention_output = None else: entity_attention_output = attention_output[:, word_size:, :] # add attentions if we output them outputs = (word_attention_output, entity_attention_output) + self_outputs[2:] return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class LukeIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class LukeOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LukeLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LukeAttention(config) self.intermediate = LukeIntermediate(config) self.output = LukeOutput(config) def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): word_size = word_hidden_states.size(1) self_attention_outputs = self.attention( word_hidden_states, entity_hidden_states, attention_mask, head_mask, output_attentions=output_attentions, ) if entity_hidden_states is None: concat_attention_output = self_attention_outputs[0] else: concat_attention_output = torch.cat(self_attention_outputs[:2], dim=1) outputs = self_attention_outputs[2:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, concat_attention_output ) word_layer_output = layer_output[:, :word_size, :] if entity_hidden_states is None: entity_layer_output = None else: entity_layer_output = layer_output[:, word_size:, :] outputs = (word_layer_output, entity_layer_output) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class LukeEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LukeLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, word_hidden_states, entity_hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_word_hidden_states = () if output_hidden_states else None all_entity_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_word_hidden_states = all_word_hidden_states + (word_hidden_states,) all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, word_hidden_states, entity_hidden_states, attention_mask, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module( word_hidden_states, entity_hidden_states, attention_mask, layer_head_mask, output_attentions, ) word_hidden_states = layer_outputs[0] if entity_hidden_states is not None: entity_hidden_states = layer_outputs[1] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[2],) if output_hidden_states: all_word_hidden_states = all_word_hidden_states + (word_hidden_states,) all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,) if not return_dict: return tuple( v for v in [ word_hidden_states, all_word_hidden_states, all_self_attentions, entity_hidden_states, all_entity_hidden_states, ] if v is not None ) return BaseLukeModelOutput( last_hidden_state=word_hidden_states, hidden_states=all_word_hidden_states, attentions=all_self_attentions, entity_last_hidden_state=entity_hidden_states, entity_hidden_states=all_entity_hidden_states, ) # Copied from transformers.models.bert.modeling_bert.BertPooler class LukePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class EntityPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.entity_emb_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.entity_emb_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class EntityPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.config = config self.transform = EntityPredictionHeadTransform(config) self.decoder = nn.Linear(config.entity_emb_size, config.entity_vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.entity_vocab_size)) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class LukePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LukeConfig base_model_prefix = "luke" supports_gradient_checkpointing = True _no_split_modules = ["LukeAttention", "LukeEntityEmbeddings"] def _init_weights(self, module: nn.Module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): if module.embedding_dim == 1: # embedding for bias parameters module.weight.data.zero_() else: module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) LUKE_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`LukeConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LUKE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`): Indices of entity tokens in the entity vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*): Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`: - 1 for entity tokens that are **not masked**, - 0 for entity tokens that are **masked**. entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Segment token indices to indicate first and second portions of the entity token inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *portion A* entity token, - 1 corresponds to a *portion B* entity token. entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*): Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any" " specific head on top.", LUKE_START_DOCSTRING, ) class LukeModel(LukePreTrainedModel): def __init__(self, config: LukeConfig, add_pooling_layer: bool = True): super().__init__(config) self.config = config self.embeddings = LukeEmbeddings(config) self.entity_embeddings = LukeEntityEmbeddings(config) self.encoder = LukeEncoder(config) self.pooler = LukePooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def get_entity_embeddings(self): return self.entity_embeddings.entity_embeddings def set_entity_embeddings(self, value): self.entity_embeddings.entity_embeddings = value def _prune_heads(self, heads_to_prune): raise NotImplementedError("LUKE does not support the pruning of attention heads") @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BaseLukeModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseLukeModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, LukeModel >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-base") >>> model = LukeModel.from_pretrained("studio-ousia/luke-base") # Compute the contextualized entity representation corresponding to the entity mention "Beyoncé" >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé" >>> encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") >>> outputs = model(**encoding) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state # Input Wikipedia entities to obtain enriched contextualized representations of word tokens >>> text = "Beyoncé lives in Los Angeles." >>> entities = [ ... "Beyoncé", ... "Los Angeles", ... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles" >>> entity_spans = [ ... (0, 7), ... (17, 28), ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> encoding = tokenizer( ... text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt" ... ) >>> outputs = model(**encoding) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if entity_ids is not None: entity_seq_length = entity_ids.size(1) if entity_attention_mask is None: entity_attention_mask = torch.ones((batch_size, entity_seq_length), device=device) if entity_token_type_ids is None: entity_token_type_ids = torch.zeros((batch_size, entity_seq_length), dtype=torch.long, device=device) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) # First, compute word embeddings word_embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) # Second, compute extended attention mask extended_attention_mask = self.get_extended_attention_mask(attention_mask, entity_attention_mask) # Third, compute entity embeddings and concatenate with word embeddings if entity_ids is None: entity_embedding_output = None else: entity_embedding_output = self.entity_embeddings(entity_ids, entity_position_ids, entity_token_type_ids) # Fourth, send embeddings through the model encoder_outputs = self.encoder( word_embedding_output, entity_embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # Fifth, get the output. LukeModel outputs the same as BertModel, namely sequence_output of shape (batch_size, seq_len, hidden_size) sequence_output = encoder_outputs[0] # Sixth, we compute the pooled_output, word_sequence_output and entity_sequence_output based on the sequence_output pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseLukeModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, entity_last_hidden_state=encoder_outputs.entity_last_hidden_state, entity_hidden_states=encoder_outputs.entity_hidden_states, ) def get_extended_attention_mask( self, word_attention_mask: torch.LongTensor, entity_attention_mask: Optional[torch.LongTensor] ): """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: word_attention_mask (`torch.LongTensor`): Attention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore. entity_attention_mask (`torch.LongTensor`, *optional*): Attention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ attention_mask = word_attention_mask if entity_attention_mask is not None: attention_mask = torch.cat([attention_mask, entity_attention_mask], dim=-1) if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError(f"Wrong shape for attention_mask (shape {attention_mask.shape})") extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min return extended_attention_mask def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask return incremental_indices.long() + padding_idx # Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead class LukeLMHead(nn.Module): """Roberta Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) # For accelerate compatibility and to not break backward compatibility if self.decoder.bias.device.type == "meta": self.decoder.bias = self.bias else: self.bias = self.decoder.bias @add_start_docstrings( """ The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and masked entity prediction. """, LUKE_START_DOCSTRING, ) class LukeForMaskedLM(LukePreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight"] def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.lm_head = LukeLMHead(config) self.entity_predictions = EntityPredictionHead(config) self.loss_fn = nn.CrossEntropyLoss() # Initialize weights and apply final processing self.post_init() def tie_weights(self): super().tie_weights() self._tie_or_clone_weights(self.entity_predictions.decoder, self.luke.entity_embeddings.entity_embeddings) def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=LukeMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.LongTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, entity_labels: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeMaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` entity_labels (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) loss = None mlm_loss = None logits = self.lm_head(outputs.last_hidden_state) if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) mlm_loss = self.loss_fn(logits.view(-1, self.config.vocab_size), labels.view(-1)) if loss is None: loss = mlm_loss mep_loss = None entity_logits = None if outputs.entity_last_hidden_state is not None: entity_logits = self.entity_predictions(outputs.entity_last_hidden_state) if entity_labels is not None: mep_loss = self.loss_fn(entity_logits.view(-1, self.config.entity_vocab_size), entity_labels.view(-1)) if loss is None: loss = mep_loss else: loss = loss + mep_loss if not return_dict: return tuple( v for v in [ loss, mlm_loss, mep_loss, logits, entity_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions, ] if v is not None ) return LukeMaskedLMOutput( loss=loss, mlm_loss=mlm_loss, mep_loss=mep_loss, logits=logits, entity_logits=entity_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity token) for entity classification tasks, such as Open Entity. """, LUKE_START_DOCSTRING, ) class LukeForEntityClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntityClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, EntityClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Returns: Examples: ```python >>> from transformers import AutoTokenizer, LukeForEntityClassification >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") >>> model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity") >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé" >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: person ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) feature_vector = outputs.entity_last_hidden_state[:, 0, :] feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: # When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary # cross entropy is used otherwise. # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if labels.ndim == 1: loss = nn.functional.cross_entropy(logits, labels) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return EntityClassificationOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity tokens) for entity pair classification tasks, such as TACRED. """, LUKE_START_DOCSTRING, ) class LukeForEntityPairClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels, False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntityPairClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, EntityPairClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Returns: Examples: ```python >>> from transformers import AutoTokenizer, LukeForEntityPairClassification >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [ ... (0, 7), ... (17, 28), ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: per:cities_of_residence ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) feature_vector = torch.cat( [outputs.entity_last_hidden_state[:, 0, :], outputs.entity_last_hidden_state[:, 1, :]], dim=1 ) feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: # When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary # cross entropy is used otherwise. # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if labels.ndim == 1: loss = nn.functional.cross_entropy(logits, labels) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return EntityPairClassificationOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks such as named entity recognition. """, LUKE_START_DOCSTRING, ) class LukeForEntitySpanClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.num_labels = config.num_labels self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EntitySpanClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.LongTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, entity_start_positions: Optional[torch.LongTensor] = None, entity_end_positions: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, EntitySpanClassificationOutput]: r""" entity_start_positions (`torch.LongTensor`): The start positions of entities in the word token sequence. entity_end_positions (`torch.LongTensor`): The end positions of entities in the word token sequence. labels (`torch.LongTensor` of shape `(batch_size, entity_length)` or `(batch_size, entity_length, num_labels)`, *optional*): Labels for computing the classification loss. If the shape is `(batch_size, entity_length)`, the cross entropy loss is used for the single-label classification. In this case, labels should contain the indices that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, entity_length, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively. Returns: Examples: ```python >>> from transformers import AutoTokenizer, LukeForEntitySpanClassification >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") >>> model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003") >>> text = "Beyoncé lives in Los Angeles" # List all possible entity spans in the text >>> word_start_positions = [0, 8, 14, 17, 21] # character-based start positions of word tokens >>> word_end_positions = [7, 13, 16, 20, 28] # character-based end positions of word tokens >>> entity_spans = [] >>> for i, start_pos in enumerate(word_start_positions): ... for end_pos in word_end_positions[i:]: ... entity_spans.append((start_pos, end_pos)) >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_indices = logits.argmax(-1).squeeze().tolist() >>> for span, predicted_class_idx in zip(entity_spans, predicted_class_indices): ... if predicted_class_idx != 0: ... print(text[span[0] : span[1]], model.config.id2label[predicted_class_idx]) Beyoncé PER Los Angeles LOC ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) hidden_size = outputs.last_hidden_state.size(-1) entity_start_positions = entity_start_positions.unsqueeze(-1).expand(-1, -1, hidden_size) if entity_start_positions.device != outputs.last_hidden_state.device: entity_start_positions = entity_start_positions.to(outputs.last_hidden_state.device) start_states = torch.gather(outputs.last_hidden_state, -2, entity_start_positions) entity_end_positions = entity_end_positions.unsqueeze(-1).expand(-1, -1, hidden_size) if entity_end_positions.device != outputs.last_hidden_state.device: entity_end_positions = entity_end_positions.to(outputs.last_hidden_state.device) end_states = torch.gather(outputs.last_hidden_state, -2, entity_end_positions) feature_vector = torch.cat([start_states, end_states, outputs.entity_last_hidden_state], dim=2) feature_vector = self.dropout(feature_vector) logits = self.classifier(feature_vector) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) # When the number of dimension of `labels` is 2, cross entropy is used as the loss function. The binary # cross entropy is used otherwise. if labels.ndim == 2: loss = nn.functional.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1)) else: loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return EntitySpanClassificationOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, LUKE_START_DOCSTRING, ) class LukeForSequenceClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config) self.dropout = nn.Dropout( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeSequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) pooled_output = outputs.pooler_output pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return LukeSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model with a token classification head on top (a linear layer on top of the hidden-states output). To solve Named-Entity Recognition (NER) task using LUKE, `LukeForEntitySpanClassification` is more suitable than this class. """, LUKE_START_DOCSTRING, ) class LukeForTokenClassification(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeTokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = outputs.last_hidden_state sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: return tuple( v for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions] if v is not None ) return LukeTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, LUKE_START_DOCSTRING, ) class LukeForQuestionAnswering(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.luke = LukeModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.FloatTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeQuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = outputs.last_hidden_state logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: return tuple( v for v in [ total_loss, start_logits, end_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions, ] if v is not None ) return LukeQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The LUKE Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, LUKE_START_DOCSTRING, ) class LukeForMultipleChoice(LukePreTrainedModel): def __init__(self, config): super().__init__(config) self.luke = LukeModel(config) self.dropout = nn.Dropout( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=LukeMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, entity_ids: Optional[torch.LongTensor] = None, entity_attention_mask: Optional[torch.FloatTensor] = None, entity_token_type_ids: Optional[torch.LongTensor] = None, entity_position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, LukeMultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) entity_ids = entity_ids.view(-1, entity_ids.size(-1)) if entity_ids is not None else None entity_attention_mask = ( entity_attention_mask.view(-1, entity_attention_mask.size(-1)) if entity_attention_mask is not None else None ) entity_token_type_ids = ( entity_token_type_ids.view(-1, entity_token_type_ids.size(-1)) if entity_token_type_ids is not None else None ) entity_position_ids = ( entity_position_ids.view(-1, entity_position_ids.size(-2), entity_position_ids.size(-1)) if entity_position_ids is not None else None ) outputs = self.luke( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) pooled_output = outputs.pooler_output pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(reshaped_logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: return tuple( v for v in [ loss, reshaped_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions, ] if v is not None ) return LukeMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/luke/modeling_luke.py/0
{ "file_path": "transformers/src/transformers/models/luke/modeling_luke.py", "repo_id": "transformers", "token_count": 43009 }
337
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import datetime import json import os import re from pathlib import Path from typing import Tuple import yaml from tqdm import tqdm from transformers.models.marian.convert_marian_to_pytorch import ( FRONT_MATTER_TEMPLATE, convert, convert_opus_name_to_hf_name, download_and_unzip, get_system_metadata, ) DEFAULT_REPO = "Tatoeba-Challenge" DEFAULT_MODEL_DIR = os.path.join(DEFAULT_REPO, "models") LANG_CODE_URL = "https://datahub.io/core/language-codes/r/language-codes-3b2.csv" ISO_URL = "https://cdn-datasets.huggingface.co/language_codes/iso-639-3.csv" ISO_PATH = "lang_code_data/iso-639-3.csv" LANG_CODE_PATH = "lang_code_data/language-codes-3b2.csv" TATOEBA_MODELS_URL = "https://object.pouta.csc.fi/Tatoeba-MT-models" class TatoebaConverter: """ Convert Tatoeba-Challenge models to huggingface format. Steps: 1. Convert numpy state dict to hf format (same code as OPUS-MT-Train conversion). 2. Rename opus model to huggingface format. This means replace each alpha3 code with an alpha2 code if a unique one exists. e.g. aav-eng -> aav-en, heb-eng -> he-en 3. Select the best model for a particular pair, parse the yml for it and write a model card. By default the best model is the one listed first in released-model-results, but it's also possible to specify the most recent one. """ def __init__(self, save_dir="marian_converted"): assert Path(DEFAULT_REPO).exists(), "need git clone [email protected]:Helsinki-NLP/Tatoeba-Challenge.git" self.download_lang_info() self.model_results = json.load(open("Tatoeba-Challenge/models/released-model-results.json")) self.alpha3_to_alpha2 = {} for line in open(ISO_PATH): parts = line.split("\t") if len(parts[0]) == 3 and len(parts[3]) == 2: self.alpha3_to_alpha2[parts[0]] = parts[3] for line in LANG_CODE_PATH: parts = line.split(",") if len(parts[0]) == 3 and len(parts[1]) == 2: self.alpha3_to_alpha2[parts[0]] = parts[1] self.model_card_dir = Path(save_dir) self.tag2name = {} for key, value in GROUP_MEMBERS.items(): self.tag2name[key] = value[0] def convert_models(self, tatoeba_ids, dry_run=False): models_to_convert = [self.parse_metadata(x) for x in tatoeba_ids] save_dir = Path("marian_ckpt") dest_dir = Path(self.model_card_dir) dest_dir.mkdir(exist_ok=True) for model in tqdm(models_to_convert): # k, prepro, download, test_set_url in tqdm(model_list): if "SentencePiece" not in model["pre-processing"]: print(f"Skipping {model['release']} because it doesn't appear to use SentencePiece") continue if not os.path.exists(save_dir / model["_name"]): download_and_unzip(f"{TATOEBA_MODELS_URL}/{model['release']}", save_dir / model["_name"]) # from convert_marian_to_pytorch opus_language_groups_to_hf = convert_opus_name_to_hf_name pair_name = opus_language_groups_to_hf(model["_name"]) convert(save_dir / model["_name"], dest_dir / f"opus-mt-{pair_name}") self.write_model_card(model, dry_run=dry_run) def expand_group_to_two_letter_codes(self, grp_name): return [self.alpha3_to_alpha2.get(x, x) for x in GROUP_MEMBERS[grp_name][1]] def is_group(self, code, name): return "languages" in name or len(GROUP_MEMBERS.get(code, [])) > 1 def get_tags(self, code, name): if len(code) == 2: assert "languages" not in name, f"{code}: {name}" return [code] elif self.is_group(code, name): group = self.expand_group_to_two_letter_codes(code) group.append(code) return group else: # zho-> zh print(f"Three letter monolingual code: {code}") return [code] def resolve_lang_code(self, src, tgt) -> Tuple[str, str]: src_tags = self.get_tags(src, self.tag2name[src]) tgt_tags = self.get_tags(tgt, self.tag2name[tgt]) return src_tags, tgt_tags @staticmethod def model_type_info_from_model_name(name): info = {"_has_backtranslated_data": False} if "1m" in name: info["_data_per_pair"] = str(1e6) if "2m" in name: info["_data_per_pair"] = str(2e6) if "4m" in name: info["_data_per_pair"] = str(4e6) if "+bt" in name: info["_has_backtranslated_data"] = True if "tuned4" in name: info["_tuned"] = re.search(r"tuned4[^-]+", name).group() return info def write_model_card(self, model_dict, dry_run=False) -> str: """ Construct card from data parsed from YAML and the model's name. upload command: aws s3 sync model_card_dir s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun """ model_dir_url = f"{TATOEBA_MODELS_URL}/{model_dict['release']}" long_pair = model_dict["_name"].split("-") assert len(long_pair) == 2, f"got a translation pair {model_dict['_name']} that doesn't appear to be a pair" short_src = self.alpha3_to_alpha2.get(long_pair[0], long_pair[0]) short_tgt = self.alpha3_to_alpha2.get(long_pair[1], long_pair[1]) model_dict["_hf_model_id"] = f"opus-mt-{short_src}-{short_tgt}" a3_src, a3_tgt = model_dict["_name"].split("-") # opus_src_tags, opus_tgt_tags = a3_src.split("+"), a3_tgt.split("+") # This messy part tries to deal with language tags in multilingual models, possibly # not all having three-letter codes resolved_src_tags, resolved_tgt_tags = self.resolve_lang_code(a3_src, a3_tgt) a2_src_tags, a2_tgt_tags = [], [] for tag in resolved_src_tags: if tag not in self.alpha3_to_alpha2: a2_src_tags.append(tag) for tag in resolved_tgt_tags: if tag not in self.alpha3_to_alpha2: a2_tgt_tags.append(tag) lang_tags = dedup(a2_src_tags + a2_tgt_tags) src_multilingual, tgt_multilingual = (len(a2_src_tags) > 1), (len(a2_tgt_tags) > 1) s, t = ",".join(a2_src_tags), ",".join(a2_tgt_tags) metadata = { "hf_name": model_dict["_name"], "source_languages": s, "target_languages": t, "opus_readme_url": f"{model_dir_url}/README.md", "original_repo": "Tatoeba-Challenge", "tags": ["translation"], "languages": lang_tags, } lang_tags = l2front_matter(lang_tags) metadata["src_constituents"] = list(GROUP_MEMBERS[a3_src][1]) metadata["tgt_constituents"] = list(GROUP_MEMBERS[a3_tgt][1]) metadata["src_multilingual"] = src_multilingual metadata["tgt_multilingual"] = tgt_multilingual backtranslated_data = "" if model_dict["_has_backtranslated_data"]: backtranslated_data = " with backtranslations" multilingual_data = "" if "_data_per_pair" in model_dict: multilingual_data = f"* data per pair in multilingual model: {model_dict['_data_per_pair']}\n" tuned = "" if "_tuned" in model_dict: tuned = f"* multilingual model tuned for: {model_dict['_tuned']}\n" model_base_filename = model_dict["release"].split("/")[-1] download = f"* download original weights: [{model_base_filename}]({model_dir_url}/{model_dict['release']})\n" langtoken = "" if tgt_multilingual: langtoken = ( "* a sentence-initial language token is required in the form of >>id<<" "(id = valid, usually three-letter target language ID)\n" ) metadata.update(get_system_metadata(DEFAULT_REPO)) scorestable = "" for k, v in model_dict.items(): if "scores" in k: this_score_table = f"* {k}\n|Test set|score|\n|---|---|\n" pairs = sorted(v.items(), key=lambda x: x[1], reverse=True) for pair in pairs: this_score_table += f"|{pair[0]}|{pair[1]}|\n" scorestable += this_score_table datainfo = "" if "training-data" in model_dict: datainfo += "* Training data: \n" for k, v in model_dict["training-data"].items(): datainfo += f" * {str(k)}: {str(v)}\n" if "validation-data" in model_dict: datainfo += "* Validation data: \n" for k, v in model_dict["validation-data"].items(): datainfo += f" * {str(k)}: {str(v)}\n" if "test-data" in model_dict: datainfo += "* Test data: \n" for k, v in model_dict["test-data"].items(): datainfo += f" * {str(k)}: {str(v)}\n" testsetfilename = model_dict["release"].replace(".zip", ".test.txt") testscoresfilename = model_dict["release"].replace(".zip", ".eval.txt") testset = f"* test set translations file: [test.txt]({model_dir_url}/{testsetfilename})\n" testscores = f"* test set scores file: [eval.txt]({model_dir_url}/{testscoresfilename})\n" # combine with Tatoeba markdown readme_url = f"{TATOEBA_MODELS_URL}/{model_dict['_name']}/README.md" extra_markdown = f""" ### {model_dict['_name']} * source language name: {self.tag2name[a3_src]} * target language name: {self.tag2name[a3_tgt]} * OPUS readme: [README.md]({readme_url}) """ content = ( f""" * model: {model_dict['modeltype']} * source language code{src_multilingual*'s'}: {', '.join(a2_src_tags)} * target language code{tgt_multilingual*'s'}: {', '.join(a2_tgt_tags)} * dataset: opus {backtranslated_data} * release date: {model_dict['release-date']} * pre-processing: {model_dict['pre-processing']} """ + multilingual_data + tuned + download + langtoken + datainfo + testset + testscores + scorestable ) content = FRONT_MATTER_TEMPLATE.format(lang_tags) + extra_markdown + content items = "\n".join([f"* {k}: {v}" for k, v in metadata.items()]) sec3 = "\n### System Info: \n" + items content += sec3 if dry_run: print("CONTENT:") print(content) print("METADATA:") print(metadata) return sub_dir = self.model_card_dir / model_dict["_hf_model_id"] sub_dir.mkdir(exist_ok=True) dest = sub_dir / "README.md" dest.open("w").write(content) for k, v in metadata.items(): if isinstance(v, datetime.date): metadata[k] = datetime.datetime.strftime(v, "%Y-%m-%d") with open(sub_dir / "metadata.json", "w", encoding="utf-8") as writeobj: json.dump(metadata, writeobj) def download_lang_info(self): Path(LANG_CODE_PATH).parent.mkdir(exist_ok=True) import wget if not os.path.exists(ISO_PATH): wget.download(ISO_URL, ISO_PATH) if not os.path.exists(LANG_CODE_PATH): wget.download(LANG_CODE_URL, LANG_CODE_PATH) def parse_metadata(self, model_name, repo_path=DEFAULT_MODEL_DIR, method="best"): p = Path(repo_path) / model_name def url_to_name(url): return url.split("/")[-1].split(".")[0] if model_name not in self.model_results: # This is not a language pair, so model results are ambiguous, go by newest method = "newest" if method == "best": # Sort by how early they appear in released-models-results results = [url_to_name(model["download"]) for model in self.model_results[model_name]] ymls = [f for f in os.listdir(p) if f.endswith(".yml") and f[:-4] in results] ymls.sort(key=lambda x: results.index(x[:-4])) metadata = yaml.safe_load(open(p / ymls[0])) metadata.update(self.model_type_info_from_model_name(ymls[0][:-4])) elif method == "newest": ymls = [f for f in os.listdir(p) if f.endswith(".yml")] # Sort by date ymls.sort( key=lambda x: datetime.datetime.strptime(re.search(r"\d\d\d\d-\d\d?-\d\d?", x).group(), "%Y-%m-%d") ) metadata = yaml.safe_load(open(p / ymls[-1])) metadata.update(self.model_type_info_from_model_name(ymls[-1][:-4])) else: raise NotImplementedError(f"Don't know argument method='{method}' to parse_metadata()") metadata["_name"] = model_name return metadata GROUP_MEMBERS = { # three letter code -> (group/language name, {constituents...} # if this language is on the target side the constituents can be used as target language codes. # if the language is on the source side they are supported natively without special codes. "aav": ("Austro-Asiatic languages", {"hoc", "hoc_Latn", "kha", "khm", "khm_Latn", "mnw", "vie", "vie_Hani"}), "afa": ( "Afro-Asiatic languages", { "acm", "afb", "amh", "apc", "ara", "arq", "ary", "arz", "hau_Latn", "heb", "kab", "mlt", "rif_Latn", "shy_Latn", "som", "thv", "tir", }, ), "afr": ("Afrikaans", {"afr"}), "alv": ( "Atlantic-Congo languages", { "ewe", "fuc", "fuv", "ibo", "kin", "lin", "lug", "nya", "run", "sag", "sna", "swh", "toi_Latn", "tso", "umb", "wol", "xho", "yor", "zul", }, ), "ara": ("Arabic", {"afb", "apc", "apc_Latn", "ara", "ara_Latn", "arq", "arq_Latn", "arz"}), "art": ( "Artificial languages", { "afh_Latn", "avk_Latn", "dws_Latn", "epo", "ido", "ido_Latn", "ile_Latn", "ina_Latn", "jbo", "jbo_Cyrl", "jbo_Latn", "ldn_Latn", "lfn_Cyrl", "lfn_Latn", "nov_Latn", "qya", "qya_Latn", "sjn_Latn", "tlh_Latn", "tzl", "tzl_Latn", "vol_Latn", }, ), "aze": ("Azerbaijani", {"aze_Latn"}), "bat": ("Baltic languages", {"lit", "lav", "prg_Latn", "ltg", "sgs"}), "bel": ("Belarusian", {"bel", "bel_Latn"}), "ben": ("Bengali", {"ben"}), "bnt": ( "Bantu languages", {"kin", "lin", "lug", "nya", "run", "sna", "swh", "toi_Latn", "tso", "umb", "xho", "zul"}, ), "bul": ("Bulgarian", {"bul", "bul_Latn"}), "cat": ("Catalan", {"cat"}), "cau": ("Caucasian languages", {"abk", "kat", "che", "ady"}), "ccs": ("South Caucasian languages", {"kat"}), "ceb": ("Cebuano", {"ceb"}), "cel": ("Celtic languages", {"gla", "gle", "bre", "cor", "glv", "cym"}), "ces": ("Czech", {"ces"}), "cpf": ("Creoles and pidgins, French‑based", {"gcf_Latn", "hat", "mfe"}), "cpp": ( "Creoles and pidgins, Portuguese-based", {"zsm_Latn", "ind", "pap", "min", "tmw_Latn", "max_Latn", "zlm_Latn"}, ), "cus": ("Cushitic languages", {"som"}), "dan": ("Danish", {"dan"}), "deu": ("German", {"deu"}), "dra": ("Dravidian languages", {"tam", "kan", "mal", "tel"}), "ell": ("Modern Greek (1453-)", {"ell"}), "eng": ("English", {"eng"}), "epo": ("Esperanto", {"epo"}), "est": ("Estonian", {"est"}), "euq": ("Basque (family)", {"eus"}), "eus": ("Basque", {"eus"}), "fin": ("Finnish", {"fin"}), "fiu": ( "Finno-Ugrian languages", { "est", "fin", "fkv_Latn", "hun", "izh", "kpv", "krl", "liv_Latn", "mdf", "mhr", "myv", "sma", "sme", "udm", "vep", "vro", }, ), "fra": ("French", {"fra"}), "gem": ( "Germanic languages", { "afr", "ang_Latn", "dan", "deu", "eng", "enm_Latn", "fao", "frr", "fry", "gos", "got_Goth", "gsw", "isl", "ksh", "ltz", "nds", "nld", "nno", "nob", "nob_Hebr", "non_Latn", "pdc", "sco", "stq", "swe", "swg", "yid", }, ), "gle": ("Irish", {"gle"}), "glg": ("Galician", {"glg"}), "gmq": ("North Germanic languages", {"dan", "nob", "nob_Hebr", "swe", "isl", "nno", "non_Latn", "fao"}), "gmw": ( "West Germanic languages", { "afr", "ang_Latn", "deu", "eng", "enm_Latn", "frr", "fry", "gos", "gsw", "ksh", "ltz", "nds", "nld", "pdc", "sco", "stq", "swg", "yid", }, ), "grk": ("Greek languages", {"grc_Grek", "ell"}), "hbs": ("Serbo-Croatian", {"hrv", "srp_Cyrl", "bos_Latn", "srp_Latn"}), "heb": ("Hebrew", {"heb"}), "hin": ("Hindi", {"hin"}), "hun": ("Hungarian", {"hun"}), "hye": ("Armenian", {"hye", "hye_Latn"}), "iir": ( "Indo-Iranian languages", { "asm", "awa", "ben", "bho", "gom", "guj", "hif_Latn", "hin", "jdt_Cyrl", "kur_Arab", "kur_Latn", "mai", "mar", "npi", "ori", "oss", "pan_Guru", "pes", "pes_Latn", "pes_Thaa", "pnb", "pus", "rom", "san_Deva", "sin", "snd_Arab", "tgk_Cyrl", "tly_Latn", "urd", "zza", }, ), "ilo": ("Iloko", {"ilo"}), "inc": ( "Indic languages", { "asm", "awa", "ben", "bho", "gom", "guj", "hif_Latn", "hin", "mai", "mar", "npi", "ori", "pan_Guru", "pnb", "rom", "san_Deva", "sin", "snd_Arab", "urd", }, ), "ine": ( "Indo-European languages", { "afr", "afr_Arab", "aln", "ang_Latn", "arg", "asm", "ast", "awa", "bel", "bel_Latn", "ben", "bho", "bjn", "bos_Latn", "bre", "bul", "bul_Latn", "cat", "ces", "cor", "cos", "csb_Latn", "cym", "dan", "deu", "dsb", "egl", "ell", "eng", "enm_Latn", "ext", "fao", "fra", "frm_Latn", "frr", "fry", "gcf_Latn", "gla", "gle", "glg", "glv", "gom", "gos", "got_Goth", "grc_Grek", "gsw", "guj", "hat", "hif_Latn", "hin", "hrv", "hsb", "hye", "hye_Latn", "ind", "isl", "ita", "jdt_Cyrl", "ksh", "kur_Arab", "kur_Latn", "lad", "lad_Latn", "lat_Grek", "lat_Latn", "lav", "lij", "lit", "lld_Latn", "lmo", "ltg", "ltz", "mai", "mar", "max_Latn", "mfe", "min", "mkd", "mwl", "nds", "nld", "nno", "nob", "nob_Hebr", "non_Latn", "npi", "oci", "ori", "orv_Cyrl", "oss", "pan_Guru", "pap", "pcd", "pdc", "pes", "pes_Latn", "pes_Thaa", "pms", "pnb", "pol", "por", "prg_Latn", "pus", "roh", "rom", "ron", "rue", "rus", "rus_Latn", "san_Deva", "scn", "sco", "sgs", "sin", "slv", "snd_Arab", "spa", "sqi", "srd", "srp_Cyrl", "srp_Latn", "stq", "swe", "swg", "tgk_Cyrl", "tly_Latn", "tmw_Latn", "ukr", "urd", "vec", "wln", "yid", "zlm_Latn", "zsm_Latn", "zza", }, ), "isl": ("Icelandic", {"isl"}), "ita": ("Italian", {"ita"}), "itc": ( "Italic languages", { "arg", "ast", "bjn", "cat", "cos", "egl", "ext", "fra", "frm_Latn", "gcf_Latn", "glg", "hat", "ind", "ita", "lad", "lad_Latn", "lat_Grek", "lat_Latn", "lij", "lld_Latn", "lmo", "max_Latn", "mfe", "min", "mwl", "oci", "pap", "pcd", "pms", "por", "roh", "ron", "scn", "spa", "srd", "tmw_Latn", "vec", "wln", "zlm_Latn", "zsm_Latn", }, ), "jpn": ("Japanese", {"jpn", "jpn_Bopo", "jpn_Hang", "jpn_Hani", "jpn_Hira", "jpn_Kana", "jpn_Latn", "jpn_Yiii"}), "jpx": ("Japanese (family)", {"jpn"}), "kat": ("Georgian", {"kat"}), "kor": ("Korean", {"kor_Hani", "kor_Hang", "kor_Latn", "kor"}), "lav": ("Latvian", {"lav"}), "lit": ("Lithuanian", {"lit"}), "mkd": ("Macedonian", {"mkd"}), "mkh": ("Mon-Khmer languages", {"vie_Hani", "mnw", "vie", "kha", "khm_Latn", "khm"}), "msa": ("Malay (macrolanguage)", {"zsm_Latn", "ind", "max_Latn", "zlm_Latn", "min"}), "mul": ( "Multiple languages", { "abk", "acm", "ady", "afb", "afh_Latn", "afr", "akl_Latn", "aln", "amh", "ang_Latn", "apc", "ara", "arg", "arq", "ary", "arz", "asm", "ast", "avk_Latn", "awa", "aze_Latn", "bak", "bam_Latn", "bel", "bel_Latn", "ben", "bho", "bod", "bos_Latn", "bre", "brx", "brx_Latn", "bul", "bul_Latn", "cat", "ceb", "ces", "cha", "che", "chr", "chv", "cjy_Hans", "cjy_Hant", "cmn", "cmn_Hans", "cmn_Hant", "cor", "cos", "crh", "crh_Latn", "csb_Latn", "cym", "dan", "deu", "dsb", "dtp", "dws_Latn", "egl", "ell", "enm_Latn", "epo", "est", "eus", "ewe", "ext", "fao", "fij", "fin", "fkv_Latn", "fra", "frm_Latn", "frr", "fry", "fuc", "fuv", "gan", "gcf_Latn", "gil", "gla", "gle", "glg", "glv", "gom", "gos", "got_Goth", "grc_Grek", "grn", "gsw", "guj", "hat", "hau_Latn", "haw", "heb", "hif_Latn", "hil", "hin", "hnj_Latn", "hoc", "hoc_Latn", "hrv", "hsb", "hun", "hye", "iba", "ibo", "ido", "ido_Latn", "ike_Latn", "ile_Latn", "ilo", "ina_Latn", "ind", "isl", "ita", "izh", "jav", "jav_Java", "jbo", "jbo_Cyrl", "jbo_Latn", "jdt_Cyrl", "jpn", "kab", "kal", "kan", "kat", "kaz_Cyrl", "kaz_Latn", "kek_Latn", "kha", "khm", "khm_Latn", "kin", "kir_Cyrl", "kjh", "kpv", "krl", "ksh", "kum", "kur_Arab", "kur_Latn", "lad", "lad_Latn", "lao", "lat_Latn", "lav", "ldn_Latn", "lfn_Cyrl", "lfn_Latn", "lij", "lin", "lit", "liv_Latn", "lkt", "lld_Latn", "lmo", "ltg", "ltz", "lug", "lzh", "lzh_Hans", "mad", "mah", "mai", "mal", "mar", "max_Latn", "mdf", "mfe", "mhr", "mic", "min", "mkd", "mlg", "mlt", "mnw", "moh", "mon", "mri", "mwl", "mww", "mya", "myv", "nan", "nau", "nav", "nds", "niu", "nld", "nno", "nob", "nob_Hebr", "nog", "non_Latn", "nov_Latn", "npi", "nya", "oci", "ori", "orv_Cyrl", "oss", "ota_Arab", "ota_Latn", "pag", "pan_Guru", "pap", "pau", "pdc", "pes", "pes_Latn", "pes_Thaa", "pms", "pnb", "pol", "por", "ppl_Latn", "prg_Latn", "pus", "quc", "qya", "qya_Latn", "rap", "rif_Latn", "roh", "rom", "ron", "rue", "run", "rus", "sag", "sah", "san_Deva", "scn", "sco", "sgs", "shs_Latn", "shy_Latn", "sin", "sjn_Latn", "slv", "sma", "sme", "smo", "sna", "snd_Arab", "som", "spa", "sqi", "srp_Cyrl", "srp_Latn", "stq", "sun", "swe", "swg", "swh", "tah", "tam", "tat", "tat_Arab", "tat_Latn", "tel", "tet", "tgk_Cyrl", "tha", "tir", "tlh_Latn", "tly_Latn", "tmw_Latn", "toi_Latn", "ton", "tpw_Latn", "tso", "tuk", "tuk_Latn", "tur", "tvl", "tyv", "tzl", "tzl_Latn", "udm", "uig_Arab", "uig_Cyrl", "ukr", "umb", "urd", "uzb_Cyrl", "uzb_Latn", "vec", "vie", "vie_Hani", "vol_Latn", "vro", "war", "wln", "wol", "wuu", "xal", "xho", "yid", "yor", "yue", "yue_Hans", "yue_Hant", "zho", "zho_Hans", "zho_Hant", "zlm_Latn", "zsm_Latn", "zul", "zza", }, ), "nic": ( "Niger-Kordofanian languages", { "bam_Latn", "ewe", "fuc", "fuv", "ibo", "kin", "lin", "lug", "nya", "run", "sag", "sna", "swh", "toi_Latn", "tso", "umb", "wol", "xho", "yor", "zul", }, ), "nld": ("Dutch", {"nld"}), "nor": ("Norwegian", {"nob", "nno"}), "phi": ("Philippine languages", {"ilo", "akl_Latn", "war", "hil", "pag", "ceb"}), "pol": ("Polish", {"pol"}), "por": ("Portuguese", {"por"}), "pqe": ( "Eastern Malayo-Polynesian languages", {"fij", "gil", "haw", "mah", "mri", "nau", "niu", "rap", "smo", "tah", "ton", "tvl"}, ), "roa": ( "Romance languages", { "arg", "ast", "cat", "cos", "egl", "ext", "fra", "frm_Latn", "gcf_Latn", "glg", "hat", "ind", "ita", "lad", "lad_Latn", "lij", "lld_Latn", "lmo", "max_Latn", "mfe", "min", "mwl", "oci", "pap", "pms", "por", "roh", "ron", "scn", "spa", "tmw_Latn", "vec", "wln", "zlm_Latn", "zsm_Latn", }, ), "ron": ("Romanian", {"ron"}), "run": ("Rundi", {"run"}), "rus": ("Russian", {"rus"}), "sal": ("Salishan languages", {"shs_Latn"}), "sem": ("Semitic languages", {"acm", "afb", "amh", "apc", "ara", "arq", "ary", "arz", "heb", "mlt", "tir"}), "sla": ( "Slavic languages", { "bel", "bel_Latn", "bos_Latn", "bul", "bul_Latn", "ces", "csb_Latn", "dsb", "hrv", "hsb", "mkd", "orv_Cyrl", "pol", "rue", "rus", "slv", "srp_Cyrl", "srp_Latn", "ukr", }, ), "slv": ("Slovenian", {"slv"}), "spa": ("Spanish", {"spa"}), "swe": ("Swedish", {"swe"}), "taw": ("Tai", {"lao", "tha"}), "tgl": ("Tagalog", {"tgl_Latn"}), "tha": ("Thai", {"tha"}), "trk": ( "Turkic languages", { "aze_Latn", "bak", "chv", "crh", "crh_Latn", "kaz_Cyrl", "kaz_Latn", "kir_Cyrl", "kjh", "kum", "ota_Arab", "ota_Latn", "sah", "tat", "tat_Arab", "tat_Latn", "tuk", "tuk_Latn", "tur", "tyv", "uig_Arab", "uig_Cyrl", "uzb_Cyrl", "uzb_Latn", }, ), "tur": ("Turkish", {"tur"}), "ukr": ("Ukrainian", {"ukr"}), "urd": ("Urdu", {"urd"}), "urj": ( "Uralic languages", { "est", "fin", "fkv_Latn", "hun", "izh", "kpv", "krl", "liv_Latn", "mdf", "mhr", "myv", "sma", "sme", "udm", "vep", "vro", }, ), "vie": ("Vietnamese", {"vie", "vie_Hani"}), "war": ("Waray (Philippines)", {"war"}), "zho": ( "Chinese", { "cjy_Hans", "cjy_Hant", "cmn", "cmn_Bopo", "cmn_Hang", "cmn_Hani", "cmn_Hans", "cmn_Hant", "cmn_Hira", "cmn_Kana", "cmn_Latn", "cmn_Yiii", "gan", "hak_Hani", "lzh", "lzh_Bopo", "lzh_Hang", "lzh_Hani", "lzh_Hans", "lzh_Hira", "lzh_Kana", "lzh_Yiii", "nan", "nan_Hani", "wuu", "wuu_Bopo", "wuu_Hani", "wuu_Latn", "yue", "yue_Bopo", "yue_Hang", "yue_Hani", "yue_Hans", "yue_Hant", "yue_Hira", "yue_Kana", "zho", "zho_Hans", "zho_Hant", }, ), "zle": ("East Slavic languages", {"bel", "orv_Cyrl", "bel_Latn", "rus", "ukr", "rue"}), "zls": ("South Slavic languages", {"bos_Latn", "bul", "bul_Latn", "hrv", "mkd", "slv", "srp_Cyrl", "srp_Latn"}), "zlw": ("West Slavic languages", {"csb_Latn", "dsb", "hsb", "pol", "ces"}), } def l2front_matter(langs): return "".join(f"- {l}\n" for l in langs) def dedup(lst): """Preservers order""" new_lst = [] for item in lst: if not item or item in new_lst: continue else: new_lst.append(item) return new_lst if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-m", "--models", action="append", help="<Required> Set flag", required=True, nargs="+", dest="models" ) parser.add_argument("-save_dir", "--save_dir", default="marian_converted", help="where to save converted models") args = parser.parse_args() resolver = TatoebaConverter(save_dir=args.save_dir) resolver.convert_models(args.models[0])
transformers/src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py", "repo_id": "transformers", "token_count": 22808 }
338
# coding=utf-8 # Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch MBART model.""" import copy import math from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings, ) from .configuration_mbart import MBartConfig if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25" _CONFIG_FOR_DOC = "MBartConfig" # Base model docstring _EXPECTED_OUTPUT_SHAPE = [1, 8, 1024] MBART_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/mbart-large-cc25", # See all MBART models at https://huggingface.co/models?filter=mbart ] # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int): """ Shift input ids one token to the right, and wrap the last non pad token (the <LID> token) Note that MBart does not have a single `decoder_start_token_id` in contrast to other Bart-like models. """ prev_output_tokens = input_ids.clone() if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` prev_output_tokens.masked_fill_(prev_output_tokens == -100, pad_token_id) index_of_eos = (prev_output_tokens.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1) decoder_start_tokens = prev_output_tokens.gather(1, index_of_eos).squeeze() prev_output_tokens[:, 1:] = prev_output_tokens[:, :-1].clone() prev_output_tokens[:, 0] = decoder_start_tokens return prev_output_tokens # Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->MBart class MBartLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): # MBart is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): """`input_ids' shape is expected to be [bsz x seqlen].""" bsz, seq_len = input_ids.shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ).expand(bsz, -1) return super().forward(positions + self.offset) # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->MBart class MBartAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[MBartConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value # Copied from transformers.models.bart.modeling_bart.BartFlashAttention2 with Bart->MBart class MBartFlashAttention2(MBartAttention): """ MBart flash attention module. This module inherits from `MBartAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # MBartFlashAttention2 attention does not support output_attentions if output_attentions: raise ValueError("MBartFlashAttention2 attention does not support output_attentions") # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, q_len, _ = hidden_states.size() # get query proj query_states = self._reshape(self.q_proj(hidden_states), -1, bsz) # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0].transpose(1, 2) value_states = past_key_value[1].transpose(1, 2) elif is_cross_attention: # cross_attentions key_states = self._reshape(self.k_proj(key_value_states), -1, bsz) value_states = self._reshape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1) value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1) else: # self_attention key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2)) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (LlamaRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = self._flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout ) attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be passed to Flash Attention API key_states (`torch.Tensor`): Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`int`, *optional*): Attention dropout softmax_scale (`float`, *optional*): The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ if not self._flash_attn_uses_top_left_mask: causal = self.is_causal else: # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. causal = self.is_causal and query_length != 1 # Contains at least one padding token in the sequence if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) else: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal ) return attn_output # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape key_layer = index_first_axis( key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) value_layer = index_first_axis( value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) if query_length == kv_seq_len: query_layer = index_first_axis( query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k ) cu_seqlens_q = cu_seqlens_k max_seqlen_in_batch_q = max_seqlen_in_batch_k indices_q = indices_k elif query_length == 1: max_seqlen_in_batch_q = 1 cu_seqlens_q = torch.arange( batch_size + 1, dtype=torch.int32, device=query_layer.device ) # There is a memcpy here, that is very bad. indices_q = cu_seqlens_q[:-1] query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. attention_mask = attention_mask[:, -query_length:] query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, key_layer, value_layer, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_in_batch_q, max_seqlen_in_batch_k), ) MBART_ATTENTION_CLASSES = { "eager": MBartAttention, "flash_attention_2": MBartFlashAttention2, } class MBartEncoderLayer(nn.Module): def __init__(self, config: MBartConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = MBART_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class MBartDecoderLayer(nn.Module): def __init__(self, config: MBartConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = MBART_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = MBART_ATTENTION_CLASSES[config._attn_implementation]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs # Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->MBart class MBartClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class MBartPreTrainedModel(PreTrainedModel): config_class = MBartConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["MBartDecoderLayer", "MBartAttention"] _supports_flash_attn_2 = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs MBART_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MBartConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MBART_GENERATION_EXAMPLE = r""" Translation example: ```python >>> from transformers import AutoTokenizer, MBartForConditionalGeneration >>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-en-ro") >>> example_english_phrase = "42 is the answer" >>> inputs = tokenizer(example_english_phrase, return_tensors="pt") >>> # Translate >>> generated_ids = model.generate(**inputs, num_beams=4, max_length=5) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] '42 este răspuns' ``` Mask filling example: ```python >>> from transformers import AutoTokenizer, MBartForConditionalGeneration >>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") >>> # de_DE is the language symbol id <LID> for German >>> TXT = "</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE" >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="pt")["input_ids"] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() ['nett', 'sehr', 'ganz', 'nicht', 'so'] ``` """ MBART_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) MBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 25004 for *en_XX*, and 25003 for *de_DE*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class MBartEncoder(MBartPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`MBartEncoderLayer`]. Args: config: MBartConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = MBartLearnedPositionalEmbedding( config.max_position_embeddings, embed_dim, ) self.layers = nn.ModuleList([MBartEncoderLayer(config) for _ in range(config.encoder_layers)]) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.layernorm_embedding = nn.LayerNorm(embed_dim) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def _backward_compatibility_gradient_checkpointing(self): # Override to not delete the attribute from the config if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False): self.gradient_checkpointing_enable() def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input = input_ids input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input) hidden_states = inputs_embeds + embed_pos.to(inputs_embeds.device) hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] if self._use_flash_attention_2: attention_mask = attention_mask if 0 in attention_mask else None else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class MBartDecoder(MBartPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MBartDecoderLayer`] Args: config: MBartConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = MBartLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, ) self.layers = nn.ModuleList([MBartDecoderLayer(config) for _ in range(config.decoder_layers)]) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.layernorm_embedding = nn.LayerNorm(config.d_model) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input = input_ids input_shape = input.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale if self._use_flash_attention_2: # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: if self._use_flash_attention_2: encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # embed positions positions = self.embed_positions(input, past_key_values_length) hidden_states = inputs_embeds + positions.to(inputs_embeds.device) hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {attn_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare MBART Model outputting raw hidden-states without any specific head on top.", MBART_START_DOCSTRING, ) class MBartModel(MBartPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: MBartConfig): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = MBartEncoder(config, self.shared) self.decoder = MBartDecoder(config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.get_input_embeddings()) self._tie_or_clone_weights(self.decoder.embed_tokens, self.get_input_embeddings()) @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Seq2SeqModelOutput, Tuple[torch.FloatTensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # different to other models, MBart automatically creates decoder_input_ids from # input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models.", MBART_START_DOCSTRING, ) class MBartForConditionalGeneration(MBartPreTrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = ["final_logits_bias"] _tied_weights_keys = ["model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: MBartConfig): super().__init__(config) self.model = MBartModel(config) self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(MBART_GENERATION_EXAMPLE) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if decoder_input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = decoder_input_ids.shape[1] - 1 decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past @add_start_docstrings( """ MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MBART_START_DOCSTRING, ) class MBartForSequenceClassification(MBartPreTrainedModel): _tied_weights_keys = ["model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight"] def __init__(self, config: MBartConfig, **kwargs): super().__init__(config, **kwargs) self.model = MBartModel(config) self.classification_head = MBartClassificationHead( config.d_model, config.d_model, config.num_labels, config.classifier_dropout, ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.bart.modeling_bart.BartForSequenceClassification.forward def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] # last hidden state eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[ :, -1, : ] logits = self.classification_head(sentence_representation) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = "regression" elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ MBART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MBART_START_DOCSTRING, ) class MBartForQuestionAnswering(MBartPreTrainedModel): _tied_weights_keys = ["model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight"] def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.model = MBartModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.bart.modeling_bart.BartForQuestionAnswering.forward def forward( self, input_ids: torch.Tensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqQuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if start_positions is not None and end_positions is not None: use_cache = False outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = ( start_logits, end_logits, ) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->MBart class MBartDecoderWrapper(MBartPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = MBartDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) # Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->MBart, facebook/bart-base->facebook/mbart-large-cc25 class MBartForCausalLM(MBartPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = MBartDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: Example: ```python >>> from transformers import AutoTokenizer, MBartForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") >>> model = MBartForCausalLM.from_pretrained("facebook/mbart-large-cc25", add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] >>> list(logits.shape) == expected_shape True ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.lm_head(outputs[0]) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs ): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] # first step, decoder_cached_states are empty return { "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": use_cache, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past
transformers/src/transformers/models/mbart/modeling_mbart.py/0
{ "file_path": "transformers/src/transformers/models/mbart/modeling_mbart.py", "repo_id": "transformers", "token_count": 44217 }
339
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import re import sys import types import torch from transformers import AutoTokenizer, GPT2Config from transformers.modeling_utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME, shard_checkpoint def add_checkpointing_args(parser): parser.add_argument("--megatron-path", type=str, default=None, help="Base directory of Megatron repository") parser.add_argument( "--convert_checkpoint_from_megatron_to_transformers", action="store_true", help=( "If True, convert a Megatron checkpoint to a Transformers checkpoint. " "If False, convert a Transformers checkpoint to a Megatron checkpoint." ), ) parser.add_argument( "--load_path", type=str, required=True, help="Path to the checkpoint to convert.", ) parser.add_argument( "--save_path", type=str, required=True, help="Path to the converted checkpoint.", ) parser.add_argument("--print-checkpoint-structure", action="store_true") return parser def add_megatron_checkpoint_args(parser): parser.add_argument( "--target_tensor_model_parallel_size", type=int, default=1, help=( "The tensor model parallel size of the converted checkpoint. " "Only used when converting a Transformers checkpoint to a Megatron checkpoint." ), ) parser.add_argument( "--target_pipeline_model_parallel_size", type=int, default=1, help=( "The pipeline model parallel size of the converted checkpoint. " "Only used when converting a Transformers checkpoint to a Megatron checkpoint." ), ) parser.add_argument( "--target_data_parallel_size", type=int, default=1, help=( "The data parallel size of the converted checkpoint. " "Only used when converting a Transformers checkpoint to a Megatron checkpoint." ), ) parser.add_argument( "--target_params_dtype", type=str, default="fp32", help=( "The dtype of the converted checkpoint. " "Only used when converting a Transformers checkpoint to a Megatron checkpoint." ), ) parser.add_argument( "--make_vocab_size_divisible_by", type=int, default=128, help=( "Pad the vocab size to be divisible by this value. " "This is added for computational efficieny reasons. " "Only used when converting a Transformers checkpoint to a Megatron checkpoint." ), ) parser.add_argument( "--use_distributed_optimizer", action="store_true", help=( "If True, use the distributed optimizer. " "Only used when converting a Transformers checkpoint to a Megatron checkpoint." ), ) return parser def add_transformers_checkpoint_args(parser): parser.add_argument( "--tokenizer_name", type=str, default=None, help=( "The name of the pre-trained tokenizer to save. " "If not None, the tokenizer will be saved. " "Only used when converting a Megatron checkpoint to a Transformers checkpoint." ), ) parser.add_argument( "--max_shard_size", type=str, default="10GB", help=( "The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size " "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`). " "Only used when converting a Megatron checkpoint to a Transformers checkpoint." ), ) return parser # The simple map of names for "automated" rules. megatron_to_transformers = { "attention.dense": ".attn.c_proj.", "self_attention.dense": ".attn.c_proj.", "mlp.dense_h_to_4h": ".mlp.c_fc.", "mlp.dense_4h_to_h": ".mlp.c_proj.", } transformers_to_megatron = {v[1:-1]: k for k, v in megatron_to_transformers.items()} tensor_parallel_params = [ # megatron-lm layers to merge across tp ranks "self_attention.query_key_value.weight", "self_attention.query_key_value.bias", "self_attention.dense.weight", "mlp.dense_h_to_4h.weight", "mlp.dense_h_to_4h.bias", "mlp.dense_4h_to_h.weight", # deprecated "attention.query_key_value.weight", "attention.query_key_value.bias", "attention.dense.weight", # transformers layers to split across tp ranks "attn.c_attn.weight", "attn.c_attn.bias", "attn.c_proj.weight", "mlp.c_fc.weight", "mlp.c_fc.bias", "mlp.c_proj.weight", ] def recursive_print(name, val, spaces=0): """ Recursively print the structure of a checkpoint. This function is taken from `convert_megatron_gpt2_checkpoint.py` Args: name (str): the name of the current tensor parameter val (Tuple(int)): the shape of the current tensor parameter spaces (int): the number of spaces to print before the output for a nested structure """ # Format the message. if name is None: msg = None else: fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}" msg = fmt.format(name) # Print and recurse (if needed). if isinstance(val, dict): if msg is not None: print(msg) for k in val.keys(): recursive_print(k, val[k], spaces + 2) elif isinstance(val, torch.Tensor): print(msg, ":", val.size()) else: print(msg, ":", val) def megatron_to_transformers_fix_query_key_value_ordering( param, checkpoint_version, num_splits, num_heads, hidden_size ): """ Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] for compatibility with later versions of NVIDIA Megatron-LM. The inverse operation is performed inside Megatron-LM to read checkpoints: https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 If param is the weight tensor of the self-attention block, the returned tensor will have to be transposed one more time to be read by HuggingFace GPT2. This function is taken from `convert_megatron_gpt2_checkpoint.py` Args: param (torch.Tensor): the tensor to permute checkpoint_version (int): the version of the checkpoint. num_splits (int): the number of projections, usually 3 for (Query, Key, Value) num_heads (int): the number of attention heads hidden_size (int): the hidden size per head """ input_shape = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:] param = param.view(*saved_shape) param = param.transpose(0, 2) param = param.transpose(1, 2).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:] param = param.view(*saved_shape) param = param.transpose(0, 1).contiguous() param = param.view(*input_shape) return param def transformers_to_megatron_fix_query_key_value_ordering( param, checkpoint_version, num_splits, num_heads, hidden_size ): """ Permutes layout of param tensor to the one compatible with respective NVIDIA Megatron-LM chekpoint versions. Input is [num_splits * num_heads * hidden_size, :] and output is [num_heads * hidden_size * num_splits, :] for version 1.0 and [num_heads * num_splits * hidden_size, :] for version 2.0 and later. If param is the weight tensor of the self-attention block, the param needs to be already transposed before calling this function. Args: param (torch.Tensor): the tensor to permute checkpoint_version (int): the version of the checkpoint. num_splits (int): the number of projections, usually 3 for (Query, Key, Value) num_heads (int): the number of attention heads hidden_size (int): the hidden size per head """ # Input is [num_splits * num_heads * hidden_size, :] input_shape = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:] param = param.view(*current_shape) param = param.transpose(0, 2) param = param.transpose(1, 2).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:] param = param.view(*current_shape) param = param.transpose(0, 1).contiguous() param = param.view(*input_shape) return param def merge_transformers_sharded_states(path, num_checkpoints): """ Merge sharded checkpoints from transformers into a single checkpoint. Args: path (str): the path to the sharded checkpoints num_checkpoints (int): the number of checkpoints to merge """ state_dict = {} for i in range(1, num_checkpoints + 1): checkpoint_path = os.path.join(path, f"pytorch_model-{i:05d}-of-{num_checkpoints:05d}.bin") current_chunk = torch.load(checkpoint_path, map_location="cpu") state_dict.update(current_chunk) return state_dict def get_megatron_sharded_states(args, tp_size, pp_size, pp_rank): """ Get sharded checkpoints from NVIDIA Megatron-LM checkpoint based on the provided tensor parallel size, pipeline parallel size and pipeline parallel rank. Args: args (argparse.Namespace): the arguments to the script tp_size (int): the tensor parallel size pp_size (int): the pipeline parallel size pp_rank (int): the pipeline parallel rank """ tp_state_dicts = [] for i in range(tp_size): sub_dir_name = f"mp_rank_{i:02d}" if pp_size == 1 else f"mp_rank_{i:02d}_{pp_rank:03d}" for checkpoint_name in ["model_optim_rng.pt", "model_rng.pt"]: checkpoint_path = os.path.join(args.load_path, sub_dir_name, checkpoint_name) if os.path.isfile(checkpoint_path): break state_dict = torch.load(checkpoint_path, map_location="cpu") tp_state_dicts.append(state_dict) return tp_state_dicts def get_element_from_dict_by_path(d, path): """ Get element from dictionary by path. If element is not present, recursively add empty dictionaries. Args: d (dict): the dictionary to get the element from path (list): the path to the element which is delimited by "." """ path = path.split(".") for k in path: if k not in d: d[k] = {} d = d[k] return d def convert_checkpoint_from_megatron_to_transformers(args): """ Convert NVIDIA Megatron-LM checkpoint to HuggingFace Transformers checkpoint. This handles Megatron checkpoints with different tensor parallelism and pipeline parallelism sizes. It saves the converted checkpoint into shards using HuggingFace Transformers checkpoint sharding functionality. This greatly extends the functionality of `convert_megatron_gpt2_checkpoint.py` Args: args (argparse.Namespace): the arguments to the script """ # Load Megatron-LM checkpoint arguments from the state dict sub_dirs = os.listdir(args.load_path) possible_sub_dirs = ["mp_rank_00", "mp_rank_00_000"] for sub_dir in possible_sub_dirs: if sub_dir in sub_dirs: rank0_checkpoint_name = os.listdir(os.path.join(args.load_path, sub_dir))[0] rank0_checkpoint_path = os.path.join(args.load_path, sub_dir, rank0_checkpoint_name) break print(f"Loading Megatron-LM checkpoint arguments from: {rank0_checkpoint_path}") state_dict = torch.load(rank0_checkpoint_path, map_location="cpu") megatron_args = state_dict.get("args", None) if megatron_args is None: raise ValueError( "Megatron-LM checkpoint does not contain arguments. This utility only supports Megatron-LM checkpoints" " containing all the megatron arguments. This is because it loads all config related to model" " architecture, the tensor and pipeline model parallel size from the checkpoint insead of user having to" " manually specify all the details. Please save Megatron-LM checkpoint along with all the megatron" " arguments to use this utility." ) # Create Transformers GPT2 config from Megatron-LM arguments if megatron_args is not None: if megatron_args.bias_gelu_fusion: activation_function = "gelu_fast" elif megatron_args.openai_gelu: activation_function = "gelu_new" else: activation_function = "gelu" else: # in the very early days this used to be "gelu_new" activation_function = "gelu_new" vocab_size = ( megatron_args.padded_vocab_size if getattr(megatron_args, "orig_vocab_size", None) is None else megatron_args.orig_vocab_size ) print(vocab_size) config = GPT2Config( vocab_size=vocab_size, n_positions=megatron_args.max_position_embeddings, n_embd=megatron_args.hidden_size, n_layer=megatron_args.num_layers, n_head=megatron_args.num_attention_heads, n_inner=megatron_args.ffn_hidden_size, activation_function=activation_function, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, summary_type="cls_index", summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, scale_attn_weights=True, use_cache=True, bos_token_id=vocab_size - 1, eos_token_id=vocab_size - 1, architectures=["GPT2LMHeadModel"], ) output_state_dict = {} checkpoint_version = state_dict.get("checkpoint_version", 0.0) tp_size = megatron_args.tensor_model_parallel_size pp_size = megatron_args.pipeline_model_parallel_size dtype = torch.float32 # The regex to extract layer names. layer_re = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)") # Convert. print("Converting") # Embeddings print("Converting embeddings") tp_state_dicts = get_megatron_sharded_states(args, tp_size, pp_size, 0) # Convert and store the position embeddings. position_embeddings = get_element_from_dict_by_path( tp_state_dicts[0], "model.language_model.embedding.position_embeddings.weight" ) output_state_dict["transformer.wpe.weight"] = position_embeddings.to(dtype) # Convert and store the word embeddings. word_embeddings = torch.cat( [ get_element_from_dict_by_path( tp_state_dicts[tp_rank], "model.language_model.embedding.word_embeddings.weight" ) for tp_rank in range(tp_size) ], dim=0, ) word_embeddings = word_embeddings[:vocab_size].to(dtype) output_state_dict["transformer.wte.weight"] = word_embeddings # Transformer Layers print("Converting transformer layers") # The number of heads. heads = config.n_head # The hidden_size per head. hidden_size_per_head = config.n_embd // config.n_head n_positions = config.n_positions num_layers = config.num_hidden_layers // pp_size for pp_rank in range(pp_size): if pp_size > 0: print(f"Converting pipeline parallel rank {pp_rank}") tp_state_dicts = get_megatron_sharded_states(args, tp_size, pp_size, pp_rank) # The transformer. path = ( "model.language_model.transformer" if "transformer" in get_element_from_dict_by_path(tp_state_dicts[0], "model.language_model").keys() else "model.language_model.encoder" ) # Extract the layers. for key, val in get_element_from_dict_by_path(tp_state_dicts[0], path).items(): # Match the name. m = layer_re.match(key) # Stop if that's not a layer if m is None: break # The index of the layer. layer_idx = int(m.group(1)) + pp_rank * num_layers # The name of the operation. op_name = m.group(2) # Is it a weight or a bias? weight_or_bias = m.group(3) # The name of the layer. layer_name = f"transformer.h.{layer_idx}" if op_name + "." + weight_or_bias not in tensor_parallel_params: params = val.to(dtype) else: dim = 1 if op_name in ["self_attention.dense", "mlp.dense_4h_to_h", "attention.dense"] else 0 params = torch.cat( [val] + [ get_element_from_dict_by_path(tp_state_dicts[tp_rank], f"{path}")[key] for tp_rank in range(1, tp_size) ], dim=dim, ).to(dtype) # For layernorm(s), simply store the layer norm. if op_name.endswith("layernorm"): ln_name = "ln_1" if op_name.startswith("input") else "ln_2" output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = params # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=dtype)).view( 1, 1, n_positions, n_positions ) output_state_dict[layer_name + ".attn.bias"] = causal_mask # Insert a "dummy" tensor for masked_bias. masked_bias = torch.tensor(-1e4, dtype=dtype) output_state_dict[layer_name + ".attn.masked_bias"] = masked_bias out_val = megatron_to_transformers_fix_query_key_value_ordering( params, checkpoint_version, 3, heads, hidden_size_per_head, ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. out_val = out_val.transpose(0, 1).contiguous() # Store. output_state_dict[layer_name + ".attn.c_attn.weight"] = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": out_val = megatron_to_transformers_fix_query_key_value_ordering( params, checkpoint_version, 3, heads, hidden_size_per_head ) # Store. No change of shape. output_state_dict[layer_name + ".attn.c_attn.bias"] = out_val # Transpose the weights. elif weight_or_bias == "weight": out_name = megatron_to_transformers[op_name] output_state_dict[layer_name + out_name + "weight"] = params.transpose(0, 1) # Copy the bias. elif weight_or_bias == "bias": out_name = megatron_to_transformers[op_name] output_state_dict[layer_name + out_name + "bias"] = params if config.n_layer != (layer_idx + 1): raise ValueError(f"Expected {config.n_layer} layers but found {layer_idx + 1}") # The final layernorm. print("Converting final layernorm") params = get_element_from_dict_by_path(tp_state_dicts[0], str(path)) output_state_dict["transformer.ln_f.weight"] = params["final_layernorm.weight"].to(dtype) output_state_dict["transformer.ln_f.bias"] = params["final_layernorm.bias"].to(dtype) # For LM head, transformers' wants the matrix to weight embeddings. print("Converting LM head") output_state_dict["lm_head.weight"] = word_embeddings.to(dtype) # It should be done! print("Conversion from Megatron-LM to Transformers is done!") # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(None, output_state_dict) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if args.tokenizer_name is None: tokenizer_name = "gpt2" else: tokenizer_name = args.tokenizer_name tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) tokenizer_class = type(tokenizer).__name__ config.tokenizer_class = tokenizer_class # Store the config to file. print("Saving config") config.save_pretrained(args.save_path) # Save tokenizer based on args if args.tokenizer_name is not None: print(f"Adding {tokenizer_class} tokenizer files") tokenizer.save_pretrained(args.save_path) # Store the state_dict to file. max_shard_size = int(args.max_shard_size) if args.max_shard_size.isdigit() else args.max_shard_size shards, index = shard_checkpoint(output_state_dict, max_shard_size=max_shard_size) # Save the model for shard_file, shard in shards.items(): torch.save(shard, os.path.join(args.save_path, shard_file)) if index is None: print(f"Model weights saved in {os.path.join(args.save_path, WEIGHTS_NAME)}") else: save_index_file = os.path.join(args.save_path, WEIGHTS_INDEX_NAME) # Save the index as well with open(save_index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) print( f"The model is bigger than the maximum size per checkpoint ({args.max_shard_size}) and is going to be " f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) def convert_checkpoint_from_transformers_to_megatron(args): """ Convert a checkpoint from HuggingFace Transformers to Megatron-LM. This allows converted checkpoints with variable tensor parallelism and pipeline parallelism sizes. It takes as input a checkpoint from HuggingFace Transformers which can have multiple shards. Args: args (argparse.Namespace): the arguments to the script """ os.makedirs(args.save_path, exist_ok=True) # Search in directory above this sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) if args.megatron_path is not None: sys.path.insert(0, args.megatron_path) try: from megatron.tokenizer.tokenizer import _vocab_size_with_padding except ModuleNotFoundError: print("Unable to import Megatron, please specify the path to Megatron using --megatron-path. Exiting.") exit(1) # load the transformers model state dict and config sub_dirs = [x for x in os.listdir(args.load_path) if x.startswith("pytorch_model")] if len(sub_dirs) == 1: checkpoint_name = "pytorch_model.bin" state_dict = torch.load(os.path.join(args.load_path, checkpoint_name), map_location="cpu") else: num_checkpoints = len(sub_dirs) - 1 state_dict = merge_transformers_sharded_states(args.load_path, num_checkpoints) config = GPT2Config.from_pretrained(args.load_path) # Saving the tracker file tracker_filepath = os.path.join(args.save_path, "latest_checkpointed_iteration.txt") with open(tracker_filepath, "w") as f: f.write("release") # create `release` dir in args.load_path release_dir = os.path.join(args.save_path, "release") os.makedirs(release_dir, exist_ok=True) # megatron args megatron_args = { "orig_vocab_size": config.vocab_size, "max_position_embeddings": config.n_positions, "hidden_size": config.n_embd, "num_layers": config.n_layer, "num_attention_heads": config.n_head, "ffn_hidden_size": config.n_inner, "tensor_model_parallel_size": args.target_tensor_model_parallel_size, "pipeline_model_parallel_size": args.target_pipeline_model_parallel_size, "data_parallel_size": args.target_data_parallel_size, "make_vocab_size_divisible_by": args.make_vocab_size_divisible_by, "rank": 0, "tokenizer_type": "GPT2BPETokenizer", } if config.activation_function == "gelu": megatron_args["bias_gelu_fusion"] = False megatron_args["openai_gelu"] = False elif config.activation_function == "gelu_fast": megatron_args["bias_gelu_fusion"] = True megatron_args["openai_gelu"] = False elif config.activation_function == "gelu_new": megatron_args["bias_gelu_fusion"] = False megatron_args["openai_gelu"] = True margs = types.SimpleNamespace() for k, v in megatron_args.items(): setattr(margs, k, v) # params dtype if args.target_params_dtype == "fp16": dtype = torch.float16 elif args.target_params_dtype == "bf16": dtype = torch.bfloat16 else: dtype = torch.float32 setattr(margs, "params_dtype", dtype) # save dummy optim state dict dummy_optim_state_dict = {} dummy_optim_state_dict["optimizer"] = { "step": 0, "param_groups": [ { "lr": 0.0, "beta1": 0.0, "beta2": 0.0, "eps": 0.0, "weight_decay": 0.0, "correct_bias": False, "params": [], } ], } if args.use_distributed_optimizer: for i in range(args.target_pipeline_model_parallel_size): for j in range(args.target_tensor_model_parallel_size): for k in range(args.target_data_parallel_size): if args.target_pipeline_model_parallel_size == 1: checkpoint_dir = f"mp_rank_{j:02d}_{k:03d}" else: checkpoint_dir = f"mp_rank_{j:02d}_{i:03d}_{k:03d}" checkpoint_dir = os.path.join(release_dir, checkpoint_dir) os.makedirs(checkpoint_dir, exist_ok=True) torch.save( dummy_optim_state_dict, os.path.join(checkpoint_dir, "optim.pt"), ) # Convert. print("Converting") output_state_dict = [] for i in range(args.target_tensor_model_parallel_size): output_state_dict.append({}) # Embedding layer print("converting embedding layer") pos_embedding = state_dict["transformer.wpe.weight"].to(dtype) word_embedding = state_dict["transformer.wte.weight"].to(dtype) orig_vocab_size = config.vocab_size padded_vocab_size = _vocab_size_with_padding(orig_vocab_size, margs) setattr(margs, "padded_vocab_size", padded_vocab_size) # Cut out extra padding we don't need if orig_vocab_size > padded_vocab_size: full_word_embed = word_embedding[0:padded_vocab_size, :] # Expanding embedding to larger size by replicating final entry elif orig_vocab_size < padded_vocab_size: padding_size = padded_vocab_size - orig_vocab_size full_word_embed = torch.cat((word_embedding, word_embedding[-1].unsqueeze(0).expand(padding_size, -1))) # Same size! else: full_word_embed = word_embedding # Split into new tensor model parallel sizes out_word_embed = torch.chunk(full_word_embed, args.target_tensor_model_parallel_size, dim=0) for i in range(args.target_tensor_model_parallel_size): pos_emb_dict = get_element_from_dict_by_path( output_state_dict[i], "model.language_model.embedding.position_embeddings" ) pos_emb_dict["weight"] = pos_embedding word_emb_dict = get_element_from_dict_by_path( output_state_dict[i], "model.language_model.embedding.word_embeddings" ) word_emb_dict["weight"] = out_word_embed[i].clone() # Transformer layers print("converting transformer layers") if config.num_attention_heads % args.target_tensor_model_parallel_size != 0: raise ValueError( f"Number of attention heads ({config.num_attention_heads}) must be divisible by number of tensor parallelism" f" ({args.target_tensor_model_parallel_size})" ) if config.num_hidden_layers % args.target_pipeline_model_parallel_size != 0: raise ValueError( f"Number of layers ({config.num_hidden_layers}) must be divisible by number of pipeline parallelism" f" ({args.target_pipeline_model_parallel_size})" ) num_layers = config.num_hidden_layers // args.target_pipeline_model_parallel_size layer_re = re.compile(r"transformer.h\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)") # The number of heads. heads = config.n_head # The hidden_size per head. hidden_size_per_head = config.n_embd // config.n_head for pp_rank in range(args.target_pipeline_model_parallel_size): layer_offset = pp_rank * num_layers if pp_rank > 0: output_state_dict = [] for i in range(args.target_tensor_model_parallel_size): output_state_dict.append({}) for layer in range(num_layers): pp_layer_id = layer + layer_offset layers_to_copy = [ layer_name for layer_name in state_dict.keys() if layer_name.startswith(f"transformer.h.{pp_layer_id}.") ] for layer_name in layers_to_copy: m = layer_re.match(layer_name) # Stop if that's not a layer if m is None: break # The index of the layer. _ = int(m.group(1)) # The name of the operation. op_name = m.group(2) # Is it a weight or a bias? weight_or_bias = m.group(3) params = state_dict[layer_name].to(dtype) # handle layernorm if op_name.startswith("ln"): out_name = "input_layernorm" if op_name.endswith("1") else "post_attention_layernorm" layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}" # handle attention K, V, Q weights elif op_name.startswith("attn.c_attn") and weight_or_bias == "weight": # transformers stores D X (3*D) but Megatron-LM expects (3*D) X D. params = params.transpose(0, 1).contiguous() params = transformers_to_megatron_fix_query_key_value_ordering( params, 3.0, 3, heads, hidden_size_per_head, ) layer_name = f"layers.{layer}.self_attention.query_key_value.{weight_or_bias}" # handle attention K, V, Q bias elif op_name.startswith("attn.c_attn") and weight_or_bias == "bias": params = transformers_to_megatron_fix_query_key_value_ordering( params, 3.0, 3, heads, hidden_size_per_head, ) layer_name = f"layers.{layer}.self_attention.query_key_value.{weight_or_bias}" # handle attention and mlp weights elif weight_or_bias == "weight": out_name = transformers_to_megatron.get(op_name, None) if out_name is None: continue params = params.transpose(0, 1) layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}" # handle attention and mlp bias elif weight_or_bias == "bias": out_name = transformers_to_megatron.get(op_name, None) if out_name is None: continue layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}" # skip else: continue if op_name + "." + weight_or_bias in tensor_parallel_params: dim = 1 if op_name in ["attn.c_proj", "mlp.c_proj"] else 0 params = torch.chunk(params, args.target_tensor_model_parallel_size, dim=dim) for i in range(args.target_tensor_model_parallel_size): params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.language_model.encoder") params_dict[layer_name] = ( params[i].clone() if (op_name + "." + weight_or_bias in tensor_parallel_params) else params ) if pp_rank == args.target_pipeline_model_parallel_size - 1: # handle final layernorm for weight_or_bias in ["weight", "bias"]: params = state_dict[f"transformer.ln_f.{weight_or_bias}"].to(dtype) layer_name = f"final_layernorm.{weight_or_bias}" for i in range(args.target_tensor_model_parallel_size): params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.language_model.encoder") params_dict[layer_name] = params # add the LM head for i in range(args.target_tensor_model_parallel_size): params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.word_embeddings_for_head") params_dict["weight"] = out_word_embed[i].clone() # saving the state dict as per the tp_rank and pp_rank for tp_rank in range(args.target_tensor_model_parallel_size): output_state_dict[tp_rank]["checkpoint_version"] = 3.0 output_state_dict[tp_rank]["args"] = margs checkpoint_dir = ( f"mp_rank_{tp_rank:02d}" if args.target_pipeline_model_parallel_size == 1 else f"mp_rank_{tp_rank:02d}_{pp_rank:03d}" ) if args.use_distributed_optimizer: checkpoint_name = "model_rng.pt" else: checkpoint_name = "model_optim_rng.pt" output_state_dict[tp_rank]["optimizer"] = dummy_optim_state_dict["optimizer"] checkpoint_dir = os.path.join(release_dir, checkpoint_dir) os.makedirs(checkpoint_dir, exist_ok=True) checkpoint_path = os.path.join(checkpoint_dir, checkpoint_name) if args.print_checkpoint_structure: print( f"Checkpoint structure of model state dict shard belonging to TP rank {tp_rank} and PP rank" f" {pp_rank}:" ) recursive_print(None, output_state_dict[tp_rank]) torch.save(output_state_dict[tp_rank], checkpoint_path) def main(): parser = argparse.ArgumentParser() parser = add_checkpointing_args(parser) parser = add_megatron_checkpoint_args(parser) parser = add_transformers_checkpoint_args(parser) args = parser.parse_args() if args.convert_checkpoint_from_megatron_to_transformers: convert_checkpoint_from_megatron_to_transformers(args) else: convert_checkpoint_from_transformers_to_megatron(args) if __name__ == "__main__": main()
transformers/src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py/0
{ "file_path": "transformers/src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py", "repo_id": "transformers", "token_count": 16485 }
340
# coding=utf-8 # Copyright 2023 Apple Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE """ PyTorch MobileViTV2 model.""" from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_mobilevitv2 import MobileViTV2Config logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "MobileViTV2Config" # Base docstring _CHECKPOINT_FOR_DOC = "apple/mobilevitv2-1.0-imagenet1k-256" _EXPECTED_OUTPUT_SHAPE = [1, 512, 8, 8] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "apple/mobilevitv2-1.0-imagenet1k-256" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST = [ "apple/mobilevitv2-1.0-imagenet1k-256" # See all MobileViTV2 models at https://huggingface.co/models?filter=mobilevitv2 ] # Copied from transformers.models.mobilevit.modeling_mobilevit.make_divisible def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int: """ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the original TensorFlow repo. It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py """ if min_value is None: min_value = divisor new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_value < 0.9 * value: new_value += divisor return int(new_value) def clip(value: float, min_val: float = float("-inf"), max_val: float = float("inf")) -> float: return max(min_val, min(max_val, value)) # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTConvLayer with MobileViT->MobileViTV2 class MobileViTV2ConvLayer(nn.Module): def __init__( self, config: MobileViTV2Config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, groups: int = 1, bias: bool = False, dilation: int = 1, use_normalization: bool = True, use_activation: Union[bool, str] = True, ) -> None: super().__init__() padding = int((kernel_size - 1) / 2) * dilation if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") self.convolution = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode="zeros", ) if use_normalization: self.normalization = nn.BatchNorm2d( num_features=out_channels, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, ) else: self.normalization = None if use_activation: if isinstance(use_activation, str): self.activation = ACT2FN[use_activation] elif isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act else: self.activation = None def forward(self, features: torch.Tensor) -> torch.Tensor: features = self.convolution(features) if self.normalization is not None: features = self.normalization(features) if self.activation is not None: features = self.activation(features) return features # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTInvertedResidual with MobileViT->MobileViTV2 class MobileViTV2InvertedResidual(nn.Module): """ Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381 """ def __init__( self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int, dilation: int = 1 ) -> None: super().__init__() expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8) if stride not in [1, 2]: raise ValueError(f"Invalid stride {stride}.") self.use_residual = (stride == 1) and (in_channels == out_channels) self.expand_1x1 = MobileViTV2ConvLayer( config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1 ) self.conv_3x3 = MobileViTV2ConvLayer( config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=3, stride=stride, groups=expanded_channels, dilation=dilation, ) self.reduce_1x1 = MobileViTV2ConvLayer( config, in_channels=expanded_channels, out_channels=out_channels, kernel_size=1, use_activation=False, ) def forward(self, features: torch.Tensor) -> torch.Tensor: residual = features features = self.expand_1x1(features) features = self.conv_3x3(features) features = self.reduce_1x1(features) return residual + features if self.use_residual else features # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTMobileNetLayer with MobileViT->MobileViTV2 class MobileViTV2MobileNetLayer(nn.Module): def __init__( self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int = 1, num_stages: int = 1 ) -> None: super().__init__() self.layer = nn.ModuleList() for i in range(num_stages): layer = MobileViTV2InvertedResidual( config, in_channels=in_channels, out_channels=out_channels, stride=stride if i == 0 else 1, ) self.layer.append(layer) in_channels = out_channels def forward(self, features: torch.Tensor) -> torch.Tensor: for layer_module in self.layer: features = layer_module(features) return features class MobileViTV2LinearSelfAttention(nn.Module): """ This layer applies a self-attention with linear complexity, as described in MobileViTV2 paper: https://arxiv.org/abs/2206.02680 Args: config (`MobileVitv2Config`): Model configuration object embed_dim (`int`): `input_channels` from an expected input of size :math:`(batch_size, input_channels, height, width)` """ def __init__(self, config: MobileViTV2Config, embed_dim: int) -> None: super().__init__() self.qkv_proj = MobileViTV2ConvLayer( config=config, in_channels=embed_dim, out_channels=1 + (2 * embed_dim), bias=True, kernel_size=1, use_normalization=False, use_activation=False, ) self.attn_dropout = nn.Dropout(p=config.attn_dropout) self.out_proj = MobileViTV2ConvLayer( config=config, in_channels=embed_dim, out_channels=embed_dim, bias=True, kernel_size=1, use_normalization=False, use_activation=False, ) self.embed_dim = embed_dim def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # (batch_size, embed_dim, num_pixels_in_patch, num_patches) --> (batch_size, 1+2*embed_dim, num_pixels_in_patch, num_patches) qkv = self.qkv_proj(hidden_states) # Project hidden_states into query, key and value # Query --> [batch_size, 1, num_pixels_in_patch, num_patches] # value, key --> [batch_size, embed_dim, num_pixels_in_patch, num_patches] query, key, value = torch.split(qkv, split_size_or_sections=[1, self.embed_dim, self.embed_dim], dim=1) # apply softmax along num_patches dimension context_scores = torch.nn.functional.softmax(query, dim=-1) context_scores = self.attn_dropout(context_scores) # Compute context vector # [batch_size, embed_dim, num_pixels_in_patch, num_patches] x [batch_size, 1, num_pixels_in_patch, num_patches] -> [batch_size, embed_dim, num_pixels_in_patch, num_patches] context_vector = key * context_scores # [batch_size, embed_dim, num_pixels_in_patch, num_patches] --> [batch_size, embed_dim, num_pixels_in_patch, 1] context_vector = torch.sum(context_vector, dim=-1, keepdim=True) # combine context vector with values # [batch_size, embed_dim, num_pixels_in_patch, num_patches] * [batch_size, embed_dim, num_pixels_in_patch, 1] --> [batch_size, embed_dim, num_pixels_in_patch, num_patches] out = torch.nn.functional.relu(value) * context_vector.expand_as(value) out = self.out_proj(out) return out class MobileViTV2FFN(nn.Module): def __init__( self, config: MobileViTV2Config, embed_dim: int, ffn_latent_dim: int, ffn_dropout: float = 0.0, ) -> None: super().__init__() self.conv1 = MobileViTV2ConvLayer( config=config, in_channels=embed_dim, out_channels=ffn_latent_dim, kernel_size=1, stride=1, bias=True, use_normalization=False, use_activation=True, ) self.dropout1 = nn.Dropout(ffn_dropout) self.conv2 = MobileViTV2ConvLayer( config=config, in_channels=ffn_latent_dim, out_channels=embed_dim, kernel_size=1, stride=1, bias=True, use_normalization=False, use_activation=False, ) self.dropout2 = nn.Dropout(ffn_dropout) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.conv1(hidden_states) hidden_states = self.dropout1(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.dropout2(hidden_states) return hidden_states class MobileViTV2TransformerLayer(nn.Module): def __init__( self, config: MobileViTV2Config, embed_dim: int, ffn_latent_dim: int, dropout: float = 0.0, ) -> None: super().__init__() self.layernorm_before = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=config.layer_norm_eps) self.attention = MobileViTV2LinearSelfAttention(config, embed_dim) self.dropout1 = nn.Dropout(p=dropout) self.layernorm_after = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=config.layer_norm_eps) self.ffn = MobileViTV2FFN(config, embed_dim, ffn_latent_dim, config.ffn_dropout) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: layernorm_1_out = self.layernorm_before(hidden_states) attention_output = self.attention(layernorm_1_out) hidden_states = attention_output + hidden_states layer_output = self.layernorm_after(hidden_states) layer_output = self.ffn(layer_output) layer_output = layer_output + hidden_states return layer_output class MobileViTV2Transformer(nn.Module): def __init__(self, config: MobileViTV2Config, n_layers: int, d_model: int) -> None: super().__init__() ffn_multiplier = config.ffn_multiplier ffn_dims = [ffn_multiplier * d_model] * n_layers # ensure that dims are multiple of 16 ffn_dims = [int((d // 16) * 16) for d in ffn_dims] self.layer = nn.ModuleList() for block_idx in range(n_layers): transformer_layer = MobileViTV2TransformerLayer( config, embed_dim=d_model, ffn_latent_dim=ffn_dims[block_idx] ) self.layer.append(transformer_layer) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: for layer_module in self.layer: hidden_states = layer_module(hidden_states) return hidden_states class MobileViTV2Layer(nn.Module): """ MobileViTV2 layer: https://arxiv.org/abs/2206.02680 """ def __init__( self, config: MobileViTV2Config, in_channels: int, out_channels: int, attn_unit_dim: int, n_attn_blocks: int = 2, dilation: int = 1, stride: int = 2, ) -> None: super().__init__() self.patch_width = config.patch_size self.patch_height = config.patch_size cnn_out_dim = attn_unit_dim if stride == 2: self.downsampling_layer = MobileViTV2InvertedResidual( config, in_channels=in_channels, out_channels=out_channels, stride=stride if dilation == 1 else 1, dilation=dilation // 2 if dilation > 1 else 1, ) in_channels = out_channels else: self.downsampling_layer = None # Local representations self.conv_kxk = MobileViTV2ConvLayer( config, in_channels=in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size, groups=in_channels, ) self.conv_1x1 = MobileViTV2ConvLayer( config, in_channels=in_channels, out_channels=cnn_out_dim, kernel_size=1, use_normalization=False, use_activation=False, ) # Global representations self.transformer = MobileViTV2Transformer(config, d_model=attn_unit_dim, n_layers=n_attn_blocks) # self.layernorm = MobileViTV2LayerNorm2D(attn_unit_dim, eps=config.layer_norm_eps) self.layernorm = nn.GroupNorm(num_groups=1, num_channels=attn_unit_dim, eps=config.layer_norm_eps) # Fusion self.conv_projection = MobileViTV2ConvLayer( config, in_channels=cnn_out_dim, out_channels=in_channels, kernel_size=1, use_normalization=True, use_activation=False, ) def unfolding(self, feature_map: torch.Tensor) -> Tuple[torch.Tensor, Tuple[int, int]]: batch_size, in_channels, img_height, img_width = feature_map.shape patches = nn.functional.unfold( feature_map, kernel_size=(self.patch_height, self.patch_width), stride=(self.patch_height, self.patch_width), ) patches = patches.reshape(batch_size, in_channels, self.patch_height * self.patch_width, -1) return patches, (img_height, img_width) def folding(self, patches: torch.Tensor, output_size: Tuple[int, int]) -> torch.Tensor: batch_size, in_dim, patch_size, n_patches = patches.shape patches = patches.reshape(batch_size, in_dim * patch_size, n_patches) feature_map = nn.functional.fold( patches, output_size=output_size, kernel_size=(self.patch_height, self.patch_width), stride=(self.patch_height, self.patch_width), ) return feature_map def forward(self, features: torch.Tensor) -> torch.Tensor: # reduce spatial dimensions if needed if self.downsampling_layer: features = self.downsampling_layer(features) # local representation features = self.conv_kxk(features) features = self.conv_1x1(features) # convert feature map to patches patches, output_size = self.unfolding(features) # learn global representations patches = self.transformer(patches) patches = self.layernorm(patches) # convert patches back to feature maps # [batch_size, patch_height, patch_width, input_dim] --> [batch_size, input_dim, patch_height, patch_width] features = self.folding(patches, output_size) features = self.conv_projection(features) return features class MobileViTV2Encoder(nn.Module): def __init__(self, config: MobileViTV2Config) -> None: super().__init__() self.config = config self.layer = nn.ModuleList() self.gradient_checkpointing = False # segmentation architectures like DeepLab and PSPNet modify the strides # of the classification backbones dilate_layer_4 = dilate_layer_5 = False if config.output_stride == 8: dilate_layer_4 = True dilate_layer_5 = True elif config.output_stride == 16: dilate_layer_5 = True dilation = 1 layer_0_dim = make_divisible( clip(value=32 * config.width_multiplier, min_val=16, max_val=64), divisor=8, min_value=16 ) layer_1_dim = make_divisible(64 * config.width_multiplier, divisor=16) layer_2_dim = make_divisible(128 * config.width_multiplier, divisor=8) layer_3_dim = make_divisible(256 * config.width_multiplier, divisor=8) layer_4_dim = make_divisible(384 * config.width_multiplier, divisor=8) layer_5_dim = make_divisible(512 * config.width_multiplier, divisor=8) layer_1 = MobileViTV2MobileNetLayer( config, in_channels=layer_0_dim, out_channels=layer_1_dim, stride=1, num_stages=1, ) self.layer.append(layer_1) layer_2 = MobileViTV2MobileNetLayer( config, in_channels=layer_1_dim, out_channels=layer_2_dim, stride=2, num_stages=2, ) self.layer.append(layer_2) layer_3 = MobileViTV2Layer( config, in_channels=layer_2_dim, out_channels=layer_3_dim, attn_unit_dim=make_divisible(config.base_attn_unit_dims[0] * config.width_multiplier, divisor=8), n_attn_blocks=config.n_attn_blocks[0], ) self.layer.append(layer_3) if dilate_layer_4: dilation *= 2 layer_4 = MobileViTV2Layer( config, in_channels=layer_3_dim, out_channels=layer_4_dim, attn_unit_dim=make_divisible(config.base_attn_unit_dims[1] * config.width_multiplier, divisor=8), n_attn_blocks=config.n_attn_blocks[1], dilation=dilation, ) self.layer.append(layer_4) if dilate_layer_5: dilation *= 2 layer_5 = MobileViTV2Layer( config, in_channels=layer_4_dim, out_channels=layer_5_dim, attn_unit_dim=make_divisible(config.base_attn_unit_dims[2] * config.width_multiplier, divisor=8), n_attn_blocks=config.n_attn_blocks[2], dilation=dilation, ) self.layer.append(layer_5) def forward( self, hidden_states: torch.Tensor, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutputWithNoAttention]: all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): if self.gradient_checkpointing and self.training: hidden_states = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, ) else: hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states) # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTPreTrainedModel with MobileViT->MobileViTV2,mobilevit->mobilevitv2 class MobileViTV2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MobileViTV2Config base_model_prefix = "mobilevitv2" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) MOBILEVITV2_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileViTV2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MOBILEVITV2_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileViTImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MobileViTV2 model outputting raw hidden-states without any specific head on top.", MOBILEVITV2_START_DOCSTRING, ) class MobileViTV2Model(MobileViTV2PreTrainedModel): def __init__(self, config: MobileViTV2Config, expand_output: bool = True): super().__init__(config) self.config = config self.expand_output = expand_output layer_0_dim = make_divisible( clip(value=32 * config.width_multiplier, min_val=16, max_val=64), divisor=8, min_value=16 ) self.conv_stem = MobileViTV2ConvLayer( config, in_channels=config.num_channels, out_channels=layer_0_dim, kernel_size=3, stride=2, use_normalization=True, use_activation=True, ) self.encoder = MobileViTV2Encoder(config) # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer_index, heads in heads_to_prune.items(): mobilevitv2_layer = self.encoder.layer[layer_index] if isinstance(mobilevitv2_layer, MobileViTV2Layer): for transformer_layer in mobilevitv2_layer.transformer.layer: transformer_layer.attention.prune_heads(heads) @add_start_docstrings_to_model_forward(MOBILEVITV2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.conv_stem(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.expand_output: last_hidden_state = encoder_outputs[0] # global average pooling: (batch_size, channels, height, width) -> (batch_size, channels) pooled_output = torch.mean(last_hidden_state, dim=[-2, -1], keepdim=False) else: last_hidden_state = encoder_outputs[0] pooled_output = None if not return_dict: output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,) return output + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, ) @add_start_docstrings( """ MobileViTV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, MOBILEVITV2_START_DOCSTRING, ) class MobileViTV2ForImageClassification(MobileViTV2PreTrainedModel): def __init__(self, config: MobileViTV2Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilevitv2 = MobileViTV2Model(config) out_channels = make_divisible(512 * config.width_multiplier, divisor=8) # layer 5 output dimension # Classifier head self.classifier = ( nn.Linear(in_features=out_channels, out_features=config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEVITV2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilevitv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTASPPPooling with MobileViT->MobileViTV2 class MobileViTV2ASPPPooling(nn.Module): def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int) -> None: super().__init__() self.global_pool = nn.AdaptiveAvgPool2d(output_size=1) self.conv_1x1 = MobileViTV2ConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, use_normalization=True, use_activation="relu", ) def forward(self, features: torch.Tensor) -> torch.Tensor: spatial_size = features.shape[-2:] features = self.global_pool(features) features = self.conv_1x1(features) features = nn.functional.interpolate(features, size=spatial_size, mode="bilinear", align_corners=False) return features class MobileViTV2ASPP(nn.Module): """ ASPP module defined in DeepLab papers: https://arxiv.org/abs/1606.00915, https://arxiv.org/abs/1706.05587 """ def __init__(self, config: MobileViTV2Config) -> None: super().__init__() encoder_out_channels = make_divisible(512 * config.width_multiplier, divisor=8) # layer 5 output dimension in_channels = encoder_out_channels out_channels = config.aspp_out_channels if len(config.atrous_rates) != 3: raise ValueError("Expected 3 values for atrous_rates") self.convs = nn.ModuleList() in_projection = MobileViTV2ConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, use_activation="relu", ) self.convs.append(in_projection) self.convs.extend( [ MobileViTV2ConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=3, dilation=rate, use_activation="relu", ) for rate in config.atrous_rates ] ) pool_layer = MobileViTV2ASPPPooling(config, in_channels, out_channels) self.convs.append(pool_layer) self.project = MobileViTV2ConvLayer( config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation="relu" ) self.dropout = nn.Dropout(p=config.aspp_dropout_prob) def forward(self, features: torch.Tensor) -> torch.Tensor: pyramid = [] for conv in self.convs: pyramid.append(conv(features)) pyramid = torch.cat(pyramid, dim=1) pooled_features = self.project(pyramid) pooled_features = self.dropout(pooled_features) return pooled_features # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTDeepLabV3 with MobileViT->MobileViTV2 class MobileViTV2DeepLabV3(nn.Module): """ DeepLabv3 architecture: https://arxiv.org/abs/1706.05587 """ def __init__(self, config: MobileViTV2Config) -> None: super().__init__() self.aspp = MobileViTV2ASPP(config) self.dropout = nn.Dropout2d(config.classifier_dropout_prob) self.classifier = MobileViTV2ConvLayer( config, in_channels=config.aspp_out_channels, out_channels=config.num_labels, kernel_size=1, use_normalization=False, use_activation=False, bias=True, ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: features = self.aspp(hidden_states[-1]) features = self.dropout(features) features = self.classifier(features) return features @add_start_docstrings( """ MobileViTV2 model with a semantic segmentation head on top, e.g. for Pascal VOC. """, MOBILEVITV2_START_DOCSTRING, ) class MobileViTV2ForSemanticSegmentation(MobileViTV2PreTrainedModel): def __init__(self, config: MobileViTV2Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilevitv2 = MobileViTV2Model(config, expand_output=False) self.segmentation_head = MobileViTV2DeepLabV3(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEVITV2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SemanticSegmenterOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> import requests >>> import torch >>> from PIL import Image >>> from transformers import AutoImageProcessor, MobileViTV2ForSemanticSegmentation >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256") >>> model = MobileViTV2ForSemanticSegmentation.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256") >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # logits are of shape (batch_size, num_labels, height, width) >>> logits = outputs.logits ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilevitv2( pixel_values, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] logits = self.segmentation_head(encoder_hidden_states) loss = None if labels is not None: if self.config.num_labels == 1: raise ValueError("The number of labels should be greater than one") else: # upsample logits to the images' original size upsampled_logits = nn.functional.interpolate( logits, size=labels.shape[-2:], mode="bilinear", align_corners=False ) loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index) loss = loss_fct(upsampled_logits, labels) if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, )
transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py/0
{ "file_path": "transformers/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py", "repo_id": "transformers", "token_count": 17368 }
341
# coding=utf-8 # Copyright 2021 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax mT5 model.""" import jax.numpy as jnp from ...utils import logging from ..t5.modeling_flax_t5 import FlaxT5EncoderModel, FlaxT5ForConditionalGeneration, FlaxT5Model from .configuration_mt5 import MT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "T5Config" # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: """ Shift input ids one token to the right. """ shifted_input_ids = jnp.zeros_like(input_ids) shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids class FlaxMT5Model(FlaxT5Model): r""" This class overrides [`FlaxT5Model`]. Please check the superclass for the appropriate documentation alongside usage examples. Examples: ```python >>> from transformers import FlaxMT5Model, AutoTokenizer >>> model = FlaxMT5Model.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, return_tensors="np") >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=decoder_input_ids) >>> hidden_states = outputs.last_hidden_state ```""" model_type = "mt5" config_class = MT5Config class FlaxMT5EncoderModel(FlaxT5EncoderModel): r""" This class overrides [`FlaxT5EncoderModel`]. Please check the superclass for the appropriate documentation alongside usage examples. Examples: ```python >>> from transformers import FlaxT5EncoderModel, AutoTokenizer >>> model = FlaxT5EncoderModel.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, return_tensors="np") >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids >>> outputs = model(input_ids=inputs["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```""" model_type = "mt5" config_class = MT5Config class FlaxMT5ForConditionalGeneration(FlaxT5ForConditionalGeneration): r""" This class overrides [`FlaxT5ForConditionalGeneration`]. Please check the superclass for the appropriate documentation alongside usage examples. Examples: ```python >>> from transformers import FlaxMT5ForConditionalGeneration, AutoTokenizer >>> model = FlaxMT5ForConditionalGeneration.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, return_tensors="np") >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids >>> outputs = model(**inputs, decoder_input_ids=decoder_input_ids) >>> logits = outputs.logits ```""" model_type = "mt5" config_class = MT5Config
transformers/src/transformers/models/mt5/modeling_flax_mt5.py/0
{ "file_path": "transformers/src/transformers/models/mt5/modeling_flax_mt5.py", "repo_id": "transformers", "token_count": 1465 }
342
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _import_structure = { "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_nezha"] = [ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", "NezhaForNextSentencePrediction", "NezhaForMaskedLM", "NezhaForPreTraining", "NezhaForMultipleChoice", "NezhaForQuestionAnswering", "NezhaForSequenceClassification", "NezhaForTokenClassification", "NezhaModel", "NezhaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/nezha/__init__.py/0
{ "file_path": "transformers/src/transformers/models/nezha/__init__.py", "repo_id": "transformers", "token_count": 899 }
343
# coding=utf-8 # Copyright 2022 UW-Madison and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Nystromformer model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { "uw-madison/nystromformer-512": "https://huggingface.co/uw-madison/nystromformer-512/resolve/main/config.json", # See all Nystromformer models at https://huggingface.co/models?filter=nystromformer } class NystromformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`NystromformerModel`]. It is used to instantiate an Nystromformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nystromformer [uw-madison/nystromformer-512](https://huggingface.co/uw-madison/nystromformer-512) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30000): Vocabulary size of the Nystromformer model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`NystromformerModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`NystromformerModel`]. segment_means_seq_len (`int`, *optional*, defaults to 64): Sequence length used in segment-means. num_landmarks (`int`, *optional*, defaults to 64): The number of landmark (or Nystrom) points to use in Nystrom approximation of the softmax self-attention matrix. conv_kernel_size (`int`, *optional*, defaults to 65): The kernel size of depthwise convolution used in Nystrom approximation. inv_coeff_init_option (`bool`, *optional*, defaults to `False`): Whether or not to use exact coefficient computation for the initial values for the iterative method of calculating the Moore-Penrose inverse of a matrix. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. Example: ```python >>> from transformers import NystromformerModel, NystromformerConfig >>> # Initializing a Nystromformer uw-madison/nystromformer-512 style configuration >>> configuration = NystromformerConfig() >>> # Initializing a model from the uw-madison/nystromformer-512 style configuration >>> model = NystromformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "nystromformer" def __init__( self, vocab_size=30000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu_new", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=510, type_vocab_size=2, segment_means_seq_len=64, num_landmarks=64, conv_kernel_size=65, inv_coeff_init_option=False, initializer_range=0.02, layer_norm_eps=1e-5, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.segment_means_seq_len = segment_means_seq_len self.num_landmarks = num_landmarks self.conv_kernel_size = conv_kernel_size self.inv_coeff_init_option = inv_coeff_init_option self.layer_norm_eps = layer_norm_eps super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
transformers/src/transformers/models/nystromformer/configuration_nystromformer.py/0
{ "file_path": "transformers/src/transformers/models/nystromformer/configuration_nystromformer.py", "repo_id": "transformers", "token_count": 2444 }
344
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_opt"] = [ "OPT_PRETRAINED_MODEL_ARCHIVE_LIST", "OPTForCausalLM", "OPTModel", "OPTPreTrainedModel", "OPTForSequenceClassification", "OPTForQuestionAnswering", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_opt"] = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_opt"] = [ "FlaxOPTForCausalLM", "FlaxOPTModel", "FlaxOPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/opt/__init__.py/0
{ "file_path": "transformers/src/transformers/models/opt/__init__.py", "repo_id": "transformers", "token_count": 1188 }
345
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for OwlViT""" import warnings from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, center_to_corners_format, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, logging if is_torch_available(): import torch logger = logging.get_logger(__name__) def _upcast(t): # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() def box_area(boxes): """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box. """ boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union class OwlViTImageProcessor(BaseImageProcessor): r""" Constructs an OWL-ViT image processor. This image processor inherits from [`ImageProcessingMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the shorter edge of the input to a certain `size`. size (`Dict[str, int]`, *optional*, defaults to {"height": 768, "width": 768}): The size to use for resizing the image. Only has an effect if `do_resize` is set to `True`. If `size` is a sequence like (h, w), output size will be matched to this. If `size` is an int, then image will be resized to (size, size). resample (`int`, *optional*, defaults to `Resampling.BICUBIC`): An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `False`): Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. crop_size (`int`, *optional*, defaults to {"height": 768, "width": 768}): The size to use for center cropping the image. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input by a certain factor. rescale_factor (`float`, *optional*, defaults to `1/255`): The factor to use for rescaling the image. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with `image_mean` and `image_std`. Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`. image_mean (`List[int]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): The sequence of means for each channel, to be used when normalizing images. image_std (`List[int]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): The sequence of standard deviations for each channel, to be used when normalizing images. """ model_input_names = ["pixel_values"] def __init__( self, do_resize=True, size=None, resample=PILImageResampling.BICUBIC, do_center_crop=False, crop_size=None, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=None, image_std=None, **kwargs, ): size = size if size is not None else {"height": 768, "width": 768} size = get_size_dict(size, default_to_square=True) crop_size = crop_size if crop_size is not None else {"height": 768, "width": 768} crop_size = get_size_dict(crop_size, default_to_square=True) # Early versions of the OWL-ViT config on the hub had "rescale" as a flag. This clashes with the # vision image processor method `rescale` as it would be set as an attribute during the super().__init__ # call. This is for backwards compatibility. if "rescale" in kwargs: rescale_val = kwargs.pop("rescale") kwargs["do_rescale"] = rescale_val super().__init__(**kwargs) self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to a certain size. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): The size to resize the image to. Must contain height and width keys. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): The resampling filter to use when resizing the input. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size, default_to_square=True) if "height" not in size or "width" not in size: raise ValueError("size dictionary must contain height and width keys") return resize( image, (size["height"], size["width"]), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def center_crop( self, image: np.ndarray, crop_size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Center crop an image to a certain size. Args: image (`np.ndarray`): Image to center crop. crop_size (`Dict[str, int]`): The size to center crop the image to. Must contain height and width keys. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ crop_size = get_size_dict(crop_size, default_to_square=True) if "height" not in crop_size or "width" not in crop_size: raise ValueError("crop_size dictionary must contain height and width keys") return center_crop( image, (crop_size["height"], crop_size["width"]), data_format=data_format, input_data_format=input_data_format, **kwargs, ) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, do_center_crop: Optional[bool] = None, crop_size: Optional[Dict[str, int]] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> BatchFeature: """ Prepares an image or batch of images for the model. Args: images (`ImageInput`): The image or batch of images to be prepared. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether or not to resize the input. If `True`, will resize the input to the size specified by `size`. size (`Dict[str, int]`, *optional*, defaults to `self.size`): The size to resize the input to. Only has an effect if `do_resize` is set to `True`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): The resampling filter to use when resizing the input. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether or not to center crop the input. If `True`, will center crop the input to the size specified by `crop_size`. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): The size to center crop the input to. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether or not to rescale the input. If `True`, will rescale the input by dividing it by `rescale_factor`. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): The factor to rescale the input by. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether or not to normalize the input. If `True`, will normalize the input by subtracting `image_mean` and dividing by `image_std`. image_mean (`Union[float, List[float]]`, *optional*, defaults to `self.image_mean`): The mean to subtract from the input when normalizing. Only has an effect if `do_normalize` is set to `True`. image_std (`Union[float, List[float]]`, *optional*, defaults to `self.image_std`): The standard deviation to divide the input by when normalizing. Only has an effect if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: defaults to the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std if do_resize is not None and size is None: raise ValueError("Size and max_size must be specified if do_resize is True.") if do_center_crop is not None and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True.") if do_rescale is not None and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # All transformations expect numpy arrays images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_center_crop: images = [ self.center_crop(image, crop_size=crop_size, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) return encoded_inputs def post_process(self, outputs, target_sizes): """ Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`OwlViTObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ # TODO: (amy) add support for other frameworks warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection` instead, with `threshold=0.` for equivalent results.", FutureWarning, ) logits, boxes = outputs.logits, outputs.pred_boxes if len(logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") probs = torch.max(logits, dim=-1) scores = torch.sigmoid(probs.values) labels = probs.indices # Convert to [x0, y0, x1, y1] format boxes = center_to_corners_format(boxes) # Convert from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results def post_process_object_detection( self, outputs, threshold: float = 0.1, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`OwlViTObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ # TODO: (amy) add support for other frameworks logits, boxes = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) probs = torch.max(logits, dim=-1) scores = torch.sigmoid(probs.values) labels = probs.indices # Convert to [x0, y0, x1, y1] format boxes = center_to_corners_format(boxes) # Convert from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results # TODO: (Amy) Make compatible with other frameworks def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_threshold=0.3, target_sizes=None): """ Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`OwlViTImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. All labels are set to None as `OwlViTForObjectDetection.image_guided_detection` perform one-shot object detection. """ logits, target_boxes = outputs.logits, outputs.target_pred_boxes if len(logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") probs = torch.max(logits, dim=-1) scores = torch.sigmoid(probs.values) # Convert to [x0, y0, x1, y1] format target_boxes = center_to_corners_format(target_boxes) # Apply non-maximum suppression (NMS) if nms_threshold < 1.0: for idx in range(target_boxes.shape[0]): for i in torch.argsort(-scores[idx]): if not scores[idx][i]: continue ious = box_iou(target_boxes[idx][i, :].unsqueeze(0), target_boxes[idx])[0][0] ious[i] = -1.0 # Mask self-IoU. scores[idx][ious > nms_threshold] = 0.0 # Convert from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(target_boxes.device) target_boxes = target_boxes * scale_fct[:, None, :] # Compute box display alphas based on prediction scores results = [] alphas = torch.zeros_like(scores) for idx in range(target_boxes.shape[0]): # Select scores for boxes matching the current query: query_scores = scores[idx] if not query_scores.nonzero().numel(): continue # Apply threshold on scores before scaling query_scores[query_scores < threshold] = 0.0 # Scale box alpha such that the best box for each query has alpha 1.0 and the worst box has alpha 0.1. # All other boxes will either belong to a different query, or will not be shown. max_score = torch.max(query_scores) + 1e-6 query_alphas = (query_scores - (max_score * 0.1)) / (max_score * 0.9) query_alphas = torch.clip(query_alphas, 0.0, 1.0) alphas[idx] = query_alphas mask = alphas[idx] > 0 box_scores = alphas[idx][mask] boxes = target_boxes[idx][mask] results.append({"scores": box_scores, "labels": None, "boxes": boxes}) return results
transformers/src/transformers/models/owlvit/image_processing_owlvit.py/0
{ "file_path": "transformers/src/transformers/models/owlvit/image_processing_owlvit.py", "repo_id": "transformers", "token_count": 11997 }
346
# coding=utf-8 # Copyright 2020 Google and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model PEGASUS.""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: PegasusTokenizer = None logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}, "tokenizer_file": { "google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json" }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/pegasus-xsum": 512, } class PegasusTokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" PEGASUS tokenizer (backed by HuggingFace's *tokenizers* library). Based on [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. mask_token (`str`, *optional*, defaults to `"<mask_2>"`): The token used for masking single token values. This is the token used when training this model with masked language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining. It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf). mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`): The token used for masking whole target sentences. This is the token used when training this model with gap sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf). additional_special_tokens (`List[str]`, *optional*): Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and <unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66) that uses the tokens 2 - 104 only for pretraining """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = PegasusTokenizer model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file=None, tokenizer_file=None, pad_token="<pad>", eos_token="</s>", unk_token="<unk>", mask_token="<mask_2>", mask_token_sent="<mask_1>", additional_special_tokens=None, offset=103, # entries 2 - 104 are only used for pretraining **kwargs, ): self.offset = offset if additional_special_tokens is not None: if not isinstance(additional_special_tokens, list): raise TypeError( f"additional_special_tokens should be of type {type(list)}, but is" f" {type(additional_special_tokens)}" ) additional_special_tokens_extended = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"<unk_{i}>" for i in range(len(additional_special_tokens_extended), self.offset - 1) ] if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." ) additional_special_tokens = additional_special_tokens_extended else: additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)] # pegasus was design to support changing the index of the first tokens. If one of the padding/eos/unk/mask token # is different from default, we must rebuild the vocab from_slow = kwargs.pop("from_slow", None) from_slow = from_slow or str(pad_token) != "<pad>" or str(eos_token) != "</s>" or str(unk_token) != "<unk>" kwargs.pop("added_tokens_decoder", {}) super().__init__( vocab_file, tokenizer_file=tokenizer_file, pad_token=pad_token, eos_token=eos_token, unk_token=unk_token, mask_token=mask_token, mask_token_sent=mask_token_sent, offset=offset, additional_special_tokens=additional_special_tokens, from_slow=from_slow, **kwargs, ) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def _special_token_mask(self, seq): all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}" ) return [1 if x in all_special_ids else 0 for x in seq] def get_special_tokens_mask( self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False ) -> List[int]: """Get list where entries are [1] if a token is [eos] or [pad] else 0.""" if already_has_special_tokens: return self._special_token_mask(token_ids_0) elif token_ids_1 is None: return self._special_token_mask(token_ids_0) + [1] else: return self._special_token_mask(token_ids_0 + token_ids_1) + [1] def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]: """ Build model inputs from a sequence by adding eos to the end. no bos token is added to the front. - single sequence: `X </s>` - pair of sequences: `A B </s>` (not intended use) Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return token_ids_0 + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_0 + token_ids_1 + [self.eos_token_id] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
transformers/src/transformers/models/pegasus/tokenization_pegasus_fast.py/0
{ "file_path": "transformers/src/transformers/models/pegasus/tokenization_pegasus_fast.py", "repo_id": "transformers", "token_count": 4338 }
347
# coding=utf-8 # Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Phi model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) PHI_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/phi-1": "https://huggingface.co/microsoft/phi-1/resolve/main/config.json", "microsoft/phi-1_5": "https://huggingface.co/microsoft/phi-1_5/resolve/main/config.json", "microsoft/phi-2": "https://huggingface.co/microsoft/phi-2/resolve/main/config.json", } class PhiConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PhiModel`]. It is used to instantiate an Phi model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Phi [microsoft/phi-1](https://huggingface.co/microsoft/phi-1). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 51200): Vocabulary size of the Phi model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`PhiModel`]. hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 8192): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. resid_pdrop (`float`, *optional*, defaults to 0.0): Dropout probability for mlp outputs. embd_pdrop (`int`, *optional*, defaults to 0.0): The dropout ratio for the embeddings. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio after computing the attention scores. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Phi-1 and Phi-1.5 supports up to 2048 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalPersimmon/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. partial_rotary_factor (`float`, *optional*, defaults to 0.5): Percentage of the query and keys which will have rotary embedding. qk_layernorm (`bool`, *optional*, defaults to `False`): Whether or not to normalize the Queries and Keys after projecting the hidden states. bos_token_id (`int`, *optional*, defaults to 1): Denotes beginning of sequences token id. eos_token_id (`int`, *optional*, defaults to 2): Denotes end of sequences token id. Example: ```python >>> from transformers import PhiModel, PhiConfig >>> # Initializing a Phi-1 style configuration >>> configuration = PhiConfig.from_pretrained("microsoft/phi-1") >>> # Initializing a model from the configuration >>> model = PhiModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "phi" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=51200, hidden_size=2048, intermediate_size=8192, num_hidden_layers=24, num_attention_heads=32, num_key_value_heads=None, resid_pdrop=0.0, embd_pdrop=0.0, attention_dropout=0.0, hidden_act="gelu_new", max_position_embeddings=2048, initializer_range=0.02, layer_norm_eps=1e-5, use_cache=True, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, partial_rotary_factor=0.5, qk_layernorm=False, bos_token_id=1, eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attention_dropout = attention_dropout self.hidden_act = hidden_act self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.partial_rotary_factor = partial_rotary_factor self.qk_layernorm = qk_layernorm self._rope_scaling_validation() super().__init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " f"got {self.rope_scaling}" ) rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_factor = self.rope_scaling.get("factor", None) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
transformers/src/transformers/models/phi/configuration_phi.py/0
{ "file_path": "transformers/src/transformers/models/phi/configuration_phi.py", "repo_id": "transformers", "token_count": 3668 }
348
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ RAG model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings RAG_CONFIG_DOC = r""" [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. """ @add_start_docstrings(RAG_CONFIG_DOC) class RagConfig(PretrainedConfig): model_type = "rag" is_composition = True def __init__( self, vocab_size=None, is_encoder_decoder=True, prefix=None, bos_token_id=None, pad_token_id=None, eos_token_id=None, decoder_start_token_id=None, title_sep=" / ", doc_sep=" // ", n_docs=5, max_combined_length=300, retrieval_vector_size=768, retrieval_batch_size=8, dataset="wiki_dpr", dataset_split="train", index_name="compressed", index_path=None, passages_path=None, use_dummy_dataset=False, reduce_loss=False, label_smoothing=0.0, do_deduplication=True, exclude_bos_score=False, do_marginalize=False, output_retrieved=False, use_cache=True, forced_eos_token_id=None, **kwargs, ): super().__init__( bos_token_id=bos_token_id, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, is_encoder_decoder=is_encoder_decoder, prefix=prefix, vocab_size=vocab_size, **kwargs, ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" question_encoder_config = kwargs.pop("question_encoder") question_encoder_model_type = question_encoder_config.pop("model_type") decoder_config = kwargs.pop("generator") decoder_model_type = decoder_config.pop("model_type") from ..auto.configuration_auto import AutoConfig self.question_encoder = AutoConfig.for_model(question_encoder_model_type, **question_encoder_config) self.generator = AutoConfig.for_model(decoder_model_type, **decoder_config) self.reduce_loss = reduce_loss self.label_smoothing = label_smoothing self.exclude_bos_score = exclude_bos_score self.do_marginalize = do_marginalize self.title_sep = title_sep self.doc_sep = doc_sep self.n_docs = n_docs self.max_combined_length = max_combined_length self.dataset = dataset self.dataset_split = dataset_split self.index_name = index_name self.retrieval_vector_size = retrieval_vector_size self.retrieval_batch_size = retrieval_batch_size self.passages_path = passages_path self.index_path = index_path self.use_dummy_dataset = use_dummy_dataset self.output_retrieved = output_retrieved self.do_deduplication = do_deduplication self.use_cache = use_cache if self.forced_eos_token_id is None: self.forced_eos_token_id = getattr(self.generator, "forced_eos_token_id", None) @classmethod def from_question_encoder_generator_configs( cls, question_encoder_config: PretrainedConfig, generator_config: PretrainedConfig, **kwargs ) -> PretrainedConfig: r""" Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and decoder model configuration. Returns: [`EncoderDecoderConfig`]: An instance of a configuration object """ return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kwargs)
transformers/src/transformers/models/rag/configuration_rag.py/0
{ "file_path": "transformers/src/transformers/models/rag/configuration_rag.py", "repo_id": "transformers", "token_count": 3331 }
349
# coding=utf-8 # Copyright 2020 The Trax Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model Reformer.""" import os from shutil import copyfile from typing import Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_reformer import ReformerTokenizer else: ReformerTokenizer = None logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google/reformer-crime-and-punishment": ( "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model" ) }, "tokenizer_file": { "google/reformer-crime-and-punishment": ( "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/tokenizer.json" ) }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/reformer-crime-and-punishment": 524288, } class ReformerTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" Reformer tokenizer (backed by HuggingFace's *tokenizers* library). Based on [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. additional_special_tokens (`List[str]`, *optional*): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = ReformerTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, eos_token="</s>", unk_token="<unk>", additional_special_tokens=[], **kwargs, ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, eos_token=eos_token, unk_token=unk_token, additional_special_tokens=additional_special_tokens, **kwargs, ) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
transformers/src/transformers/models/reformer/tokenization_reformer_fast.py/0
{ "file_path": "transformers/src/transformers/models/reformer/tokenization_reformer_fast.py", "repo_id": "transformers", "token_count": 1954 }
350
# coding=utf-8 # Copyright 2022 Microsoft Research, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ResNet model configuration""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json", } class ResNetConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ResNetModel`]. It is used to instantiate an ResNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ResNet [microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. embedding_size (`int`, *optional*, defaults to 64): Dimensionality (hidden size) for the embedding layer. hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`): Dimensionality (hidden size) at each stage. depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`): Depth (number of layers) for each stage. layer_type (`str`, *optional*, defaults to `"bottleneck"`): The layer to use, it can be either `"basic"` (used for smaller models, like resnet-18 or resnet-34) or `"bottleneck"` (used for larger models like resnet-50 and above). hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. downsample_in_first_stage (`bool`, *optional*, defaults to `False`): If `True`, the first stage will downsample the inputs using a `stride` of 2. downsample_in_bottleneck (`bool`, *optional*, defaults to `False`): If `True`, the first conv 1x1 in ResNetBottleNeckLayer will downsample the inputs using a `stride` of 2. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import ResNetConfig, ResNetModel >>> # Initializing a ResNet resnet-50 style configuration >>> configuration = ResNetConfig() >>> # Initializing a model (with random weights) from the resnet-50 style configuration >>> model = ResNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "resnet" layer_types = ["basic", "bottleneck"] def __init__( self, num_channels=3, embedding_size=64, hidden_sizes=[256, 512, 1024, 2048], depths=[3, 4, 6, 3], layer_type="bottleneck", hidden_act="relu", downsample_in_first_stage=False, downsample_in_bottleneck=False, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}") self.num_channels = num_channels self.embedding_size = embedding_size self.hidden_sizes = hidden_sizes self.depths = depths self.layer_type = layer_type self.hidden_act = hidden_act self.downsample_in_first_stage = downsample_in_first_stage self.downsample_in_bottleneck = downsample_in_bottleneck self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) class ResNetOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-3
transformers/src/transformers/models/resnet/configuration_resnet.py/0
{ "file_path": "transformers/src/transformers/models/resnet/configuration_resnet.py", "repo_id": "transformers", "token_count": 2286 }
351
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Audio/Text processor class for SeamlessM4T """ from ...processing_utils import ProcessorMixin class SeamlessM4TProcessor(ProcessorMixin): r""" Constructs a SeamlessM4T processor which wraps a SeamlessM4T feature extractor and a SeamlessM4T tokenizer into a single processor. [`SeamlessM4TProcessor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and [`SeamlessM4TTokenizerFast`]. See the [`~SeamlessM4TProcessor.__call__`] and [`~SeamlessM4TProcessor.decode`] for more information. Args: feature_extractor ([`SeamlessM4TFeatureExtractor`]): The audio processor is a required input. tokenizer ([`SeamlessM4TTokenizerFast`]): The tokenizer is a required input. """ feature_extractor_class = "SeamlessM4TFeatureExtractor" tokenizer_class = ("SeamlessM4TTokenizer", "SeamlessM4TTokenizerFast") def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, text=None, audios=None, src_lang=None, tgt_lang=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text` and `kwargs` arguments to SeamlessM4TTokenizerFast's [`~SeamlessM4TTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). audios (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels, and T the sample length of the audio. src_lang (`str`, *optional*): The language code of the input texts/audios. If not specified, the last `src_lang` specified will be used. tgt_lang (`str`, *optional*): The code of the target language. If not specified, the last `tgt_lang` specified will be used. kwargs (*optional*): Remaining dictionary of keyword arguments that will be passed to the feature extractor and/or the tokenizer. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **input_features** -- Audio input features to be fed to a model. Returned when `audios` is not `None`. """ sampling_rate = kwargs.pop("sampling_rate", None) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none.") elif text is not None and audios is not None: raise ValueError( "Text and audios are mututally exclusive when passed to `SeamlessM4T`. Specify one or another." ) elif text is not None: if tgt_lang is not None: self.tokenizer.tgt_lang = tgt_lang if src_lang is not None: self.tokenizer.src_lang = src_lang encoding = self.tokenizer(text, **kwargs) return encoding else: encoding = self.feature_extractor(audios, sampling_rate=sampling_rate, **kwargs) return encoding def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to SeamlessM4TTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to SeamlessM4TTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
transformers/src/transformers/models/seamless_m4t/processing_seamless_m4t.py/0
{ "file_path": "transformers/src/transformers/models/seamless_m4t/processing_seamless_m4t.py", "repo_id": "transformers", "token_count": 2298 }
352
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert SEW checkpoint.""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value else: hf_pointer.data = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def recursively_load_weights(fairseq_model, hf_model, is_finetuned): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): is_used = False if "conv_layers" in name: load_conv_layer( name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == "group", ) is_used = True else: for key, mapped_key in MAPPING.items(): mapped_key = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] mapped_key = mapped_key.replace("*", layer_index) if "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "weight" in name: weight_type = "weight" elif "bias" in name: weight_type = "bias" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split("conv_layers.")[-1] items = name.split(".") layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(full_name) def convert_config(model, is_finetuned): config = SEWConfig() if is_finetuned: fs_config = model.w2v_encoder.w2v_model.cfg else: fs_config = model.cfg config.conv_bias = fs_config.conv_bias conv_layers = eval(fs_config.conv_feature_layers) config.conv_dim = [x[0] for x in conv_layers] config.conv_kernel = [x[1] for x in conv_layers] config.conv_stride = [x[2] for x in conv_layers] config.feat_extract_activation = "gelu" config.feat_extract_norm = "layer" if fs_config.extractor_mode == "layer_norm" else "group" config.final_dropout = 0.0 config.hidden_act = fs_config.activation_fn.name config.hidden_size = fs_config.encoder_embed_dim config.initializer_range = 0.02 config.intermediate_size = fs_config.encoder_ffn_embed_dim config.layer_norm_eps = 1e-5 config.layerdrop = fs_config.encoder_layerdrop config.num_attention_heads = fs_config.encoder_attention_heads config.num_conv_pos_embedding_groups = fs_config.conv_pos_groups config.num_conv_pos_embeddings = fs_config.conv_pos config.num_feat_extract_layers = len(conv_layers) config.num_hidden_layers = fs_config.encoder_layers config.squeeze_factor = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: fs_config = model.cfg config.final_dropout = fs_config.final_dropout config.layerdrop = fs_config.layerdrop config.activation_dropout = fs_config.activation_dropout config.apply_spec_augment = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 config.attention_dropout = fs_config.attention_dropout config.feat_proj_dropout = fs_config.dropout_input config.hidden_dropout = fs_config.dropout config.mask_feature_length = fs_config.mask_channel_length config.mask_feature_prob = fs_config.mask_channel_prob config.mask_time_length = fs_config.mask_length config.mask_time_prob = fs_config.mask_prob config.feature_extractor_type = "Wav2Vec2FeatureExtractor" config.tokenizer_class = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def convert_sew_checkpoint( checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True ): """ Copy/paste/tweak model's weights to transformers design. """ if is_finetuned: model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])} ) else: model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) if config_path is not None: config = SEWConfig.from_pretrained(config_path) else: config = convert_config(model[0], is_finetuned) model = model[0].eval() return_attention_mask = True if config.feat_extract_norm == "layer" else False feature_extractor = Wav2Vec2FeatureExtractor( feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=True, return_attention_mask=return_attention_mask, ) if is_finetuned: if dict_path: target_dict = Dictionary.load(dict_path) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq target_dict.indices[target_dict.bos_word] = target_dict.pad_index target_dict.indices[target_dict.pad_word] = target_dict.bos_index config.bos_token_id = target_dict.pad_index config.pad_token_id = target_dict.bos_index config.eos_token_id = target_dict.eos_index config.vocab_size = len(target_dict.symbols) vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json") if not os.path.isdir(pytorch_dump_folder_path): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(pytorch_dump_folder_path)) return os.makedirs(pytorch_dump_folder_path, exist_ok=True) with open(vocab_path, "w", encoding="utf-8") as vocab_handle: json.dump(target_dict.indices, vocab_handle) tokenizer = Wav2Vec2CTCTokenizer( vocab_path, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=False, ) processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) processor.save_pretrained(pytorch_dump_folder_path) hf_model = SEWForCTC(config) else: hf_model = SEWModel(config) feature_extractor.save_pretrained(pytorch_dump_folder_path) recursively_load_weights(model, hf_model, is_finetuned) hf_model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) args = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
transformers/src/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 5650 }
353
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) _import_structure = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_speecht5"] = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_speecht5"] = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speecht5 import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechT5Config, SpeechT5HifiGanConfig, ) from .feature_extraction_speecht5 import SpeechT5FeatureExtractor from .processing_speecht5 import SpeechT5Processor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speecht5 import SpeechT5Tokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speecht5 import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechT5ForSpeechToSpeech, SpeechT5ForSpeechToText, SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Model, SpeechT5PreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/speecht5/__init__.py/0
{ "file_path": "transformers/src/transformers/models/speecht5/__init__.py", "repo_id": "transformers", "token_count": 1227 }
354
# coding=utf-8 # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch SqueezeBert model.""" import math from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_squeezebert import SqueezeBertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "squeezebert/squeezebert-uncased" _CONFIG_FOR_DOC = "SqueezeBertConfig" SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "squeezebert/squeezebert-uncased", "squeezebert/squeezebert-mnli", "squeezebert/squeezebert-mnli-headless", ] class SqueezeBertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class MatMulWrapper(nn.Module): """ Wrapper for torch.matmul(). This makes flop-counting easier to implement. Note that if you directly call torch.matmul() in your code, the flop counter will typically ignore the flops of the matmul. """ def __init__(self): super().__init__() def forward(self, mat1, mat2): """ :param inputs: two torch tensors :return: matmul of these tensors Here are the typical dimensions found in BERT (the B is optional) mat1.shape: [B, <optional extra dims>, M, K] mat2.shape: [B, <optional extra dims>, K, N] output shape: [B, <optional extra dims>, M, N] """ return torch.matmul(mat1, mat2) class SqueezeBertLayerNorm(nn.LayerNorm): """ This is a nn.LayerNorm subclass that accepts NCW data layout and performs normalization in the C dimension. N = batch C = channels W = sequence length """ def __init__(self, hidden_size, eps=1e-12): nn.LayerNorm.__init__(self, normalized_shape=hidden_size, eps=eps) # instantiates self.{weight, bias, eps} def forward(self, x): x = x.permute(0, 2, 1) x = nn.LayerNorm.forward(self, x) return x.permute(0, 2, 1) class ConvDropoutLayerNorm(nn.Module): """ ConvDropoutLayerNorm: Conv, Dropout, LayerNorm """ def __init__(self, cin, cout, groups, dropout_prob): super().__init__() self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout, kernel_size=1, groups=groups) self.layernorm = SqueezeBertLayerNorm(cout) self.dropout = nn.Dropout(dropout_prob) def forward(self, hidden_states, input_tensor): x = self.conv1d(hidden_states) x = self.dropout(x) x = x + input_tensor x = self.layernorm(x) return x class ConvActivation(nn.Module): """ ConvActivation: Conv, Activation """ def __init__(self, cin, cout, groups, act): super().__init__() self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout, kernel_size=1, groups=groups) self.act = ACT2FN[act] def forward(self, x): output = self.conv1d(x) return self.act(output) class SqueezeBertSelfAttention(nn.Module): def __init__(self, config, cin, q_groups=1, k_groups=1, v_groups=1): """ config = used for some things; ignored for others (work in progress...) cin = input channels = output channels groups = number of groups to use in conv1d layers """ super().__init__() if cin % config.num_attention_heads != 0: raise ValueError( f"cin ({cin}) is not a multiple of the number of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(cin / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=q_groups) self.key = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=k_groups) self.value = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=v_groups) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.softmax = nn.Softmax(dim=-1) self.matmul_qk = MatMulWrapper() self.matmul_qkv = MatMulWrapper() def transpose_for_scores(self, x): """ - input: [N, C, W] - output: [N, C1, W, C2] where C1 is the head index, and C2 is one head's contents """ new_x_shape = (x.size()[0], self.num_attention_heads, self.attention_head_size, x.size()[-1]) # [N, C1, C2, W] x = x.view(*new_x_shape) return x.permute(0, 1, 3, 2) # [N, C1, C2, W] --> [N, C1, W, C2] def transpose_key_for_scores(self, x): """ - input: [N, C, W] - output: [N, C1, C2, W] where C1 is the head index, and C2 is one head's contents """ new_x_shape = (x.size()[0], self.num_attention_heads, self.attention_head_size, x.size()[-1]) # [N, C1, C2, W] x = x.view(*new_x_shape) # no `permute` needed return x def transpose_output(self, x): """ - input: [N, C1, W, C2] - output: [N, C, W] """ x = x.permute(0, 1, 3, 2).contiguous() # [N, C1, C2, W] new_x_shape = (x.size()[0], self.all_head_size, x.size()[3]) # [N, C, W] x = x.view(*new_x_shape) return x def forward(self, hidden_states, attention_mask, output_attentions): """ expects hidden_states in [N, C, W] data layout. The attention_mask data layout is [N, W], and it does not need to be transposed. """ mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_key_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_score = self.matmul_qk(query_layer, key_layer) attention_score = attention_score / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_score = attention_score + attention_mask # Normalize the attention scores to probabilities. attention_probs = self.softmax(attention_score) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = self.matmul_qkv(attention_probs, value_layer) context_layer = self.transpose_output(context_layer) result = {"context_layer": context_layer} if output_attentions: result["attention_score"] = attention_score return result class SqueezeBertModule(nn.Module): def __init__(self, config): """ - hidden_size = input chans = output chans for Q, K, V (they are all the same ... for now) = output chans for the module - intermediate_size = output chans for intermediate layer - groups = number of groups for all layers in the BertModule. (eventually we could change the interface to allow different groups for different layers) """ super().__init__() c0 = config.hidden_size c1 = config.hidden_size c2 = config.intermediate_size c3 = config.hidden_size self.attention = SqueezeBertSelfAttention( config=config, cin=c0, q_groups=config.q_groups, k_groups=config.k_groups, v_groups=config.v_groups ) self.post_attention = ConvDropoutLayerNorm( cin=c0, cout=c1, groups=config.post_attention_groups, dropout_prob=config.hidden_dropout_prob ) self.intermediate = ConvActivation(cin=c1, cout=c2, groups=config.intermediate_groups, act=config.hidden_act) self.output = ConvDropoutLayerNorm( cin=c2, cout=c3, groups=config.output_groups, dropout_prob=config.hidden_dropout_prob ) def forward(self, hidden_states, attention_mask, output_attentions): att = self.attention(hidden_states, attention_mask, output_attentions) attention_output = att["context_layer"] post_attention_output = self.post_attention(attention_output, hidden_states) intermediate_output = self.intermediate(post_attention_output) layer_output = self.output(intermediate_output, post_attention_output) output_dict = {"feature_map": layer_output} if output_attentions: output_dict["attention_score"] = att["attention_score"] return output_dict class SqueezeBertEncoder(nn.Module): def __init__(self, config): super().__init__() assert config.embedding_size == config.hidden_size, ( "If you want embedding_size != intermediate hidden_size, " "please insert a Conv1d layer to adjust the number of channels " "before the first SqueezeBertModule." ) self.layers = nn.ModuleList(SqueezeBertModule(config) for _ in range(config.num_hidden_layers)) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): if head_mask is None: head_mask_is_all_none = True elif head_mask.count(None) == len(head_mask): head_mask_is_all_none = True else: head_mask_is_all_none = False assert head_mask_is_all_none is True, "head_mask is not yet supported in the SqueezeBert implementation." # [batch_size, sequence_length, hidden_size] --> [batch_size, hidden_size, sequence_length] hidden_states = hidden_states.permute(0, 2, 1) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for layer in self.layers: if output_hidden_states: hidden_states = hidden_states.permute(0, 2, 1) all_hidden_states += (hidden_states,) hidden_states = hidden_states.permute(0, 2, 1) layer_output = layer.forward(hidden_states, attention_mask, output_attentions) hidden_states = layer_output["feature_map"] if output_attentions: all_attentions += (layer_output["attention_score"],) # [batch_size, hidden_size, sequence_length] --> [batch_size, sequence_length, hidden_size] hidden_states = hidden_states.permute(0, 2, 1) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class SqueezeBertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class SqueezeBertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class SqueezeBertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = SqueezeBertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class SqueezeBertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = SqueezeBertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class SqueezeBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SqueezeBertConfig base_model_prefix = "transformer" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv1d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, SqueezeBertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) SQUEEZEBERT_START_DOCSTRING = r""" The SqueezeBERT model was proposed in [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the *squeezebert/squeezebert-mnli-headless* checkpoint as a starting point. Parameters: config ([`SqueezeBertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. Hierarchy: ``` Internal class hierarchy: SqueezeBertModel SqueezeBertEncoder SqueezeBertModule SqueezeBertSelfAttention ConvActivation ConvDropoutLayerNorm ``` Data layouts: ``` Input data is in [batch, sequence_length, hidden_size] format. Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if `output_hidden_states == True`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format. The final output of the encoder is in [batch, sequence_length, hidden_size] format. ``` """ SQUEEZEBERT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare SqueezeBERT Model transformer outputting raw hidden-states without any specific head on top.", SQUEEZEBERT_START_DOCSTRING, ) class SqueezeBertModel(SqueezeBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = SqueezeBertEmbeddings(config) self.encoder = SqueezeBertEncoder(config) self.pooler = SqueezeBertPooler(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings("""SqueezeBERT Model with a `language modeling` head on top.""", SQUEEZEBERT_START_DOCSTRING) class SqueezeBertForMaskedLM(SqueezeBertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.transformer = SqueezeBertModel(config) self.cls = SqueezeBertOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ SqueezeBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, SQUEEZEBERT_START_DOCSTRING, ) class SqueezeBertForSequenceClassification(SqueezeBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.transformer = SqueezeBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ SqueezeBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, SQUEEZEBERT_START_DOCSTRING, ) class SqueezeBertForMultipleChoice(SqueezeBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = SqueezeBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ SqueezeBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, SQUEEZEBERT_START_DOCSTRING, ) class SqueezeBertForTokenClassification(SqueezeBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = SqueezeBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ SqueezeBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, SQUEEZEBERT_START_DOCSTRING, ) class SqueezeBertForQuestionAnswering(SqueezeBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = SqueezeBertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/squeezebert/modeling_squeezebert.py/0
{ "file_path": "transformers/src/transformers/models/squeezebert/modeling_squeezebert.py", "repo_id": "transformers", "token_count": 19019 }
355
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Swin2SR.""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging logger = logging.get_logger(__name__) class Swin2SRImageProcessor(BaseImageProcessor): r""" Constructs a Swin2SR image processor. Args: do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_pad: bool = True, pad_size: int = 8, **kwargs, ) -> None: super().__init__(**kwargs) self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.pad_size = pad_size def pad( self, image: np.ndarray, size: int, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Pad an image to make the height and width divisible by `size`. Args: image (`np.ndarray`): Image to pad. size (`int`): The size to make the height and width divisible by. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The padded image. """ old_height, old_width = get_image_size(image, input_data_format) pad_height = (old_height // size + 1) * size - old_height pad_width = (old_width // size + 1) * size - old_width return pad( image, ((0, pad_height), (0, pad_width)), mode="symmetric", data_format=data_format, input_data_format=input_data_format, ) def preprocess( self, images: ImageInput, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_pad: Optional[bool] = None, pad_size: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image to make the height and width divisible by `window_size`. pad_size (`int`, *optional*, defaults to 32): The size of the sliding window for the local attention. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of typ, input_data_format=input_data_formate `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_pad = do_pad if do_pad is not None else self.do_pad pad_size = pad_size if pad_size is not None else self.pad_size images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_pad: images = [self.pad(image, size=pad_size, input_data_format=input_data_format) for image in images] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
transformers/src/transformers/models/swin2sr/image_processing_swin2sr.py/0
{ "file_path": "transformers/src/transformers/models/swin2sr/image_processing_swin2sr.py", "repo_id": "transformers", "token_count": 3791 }
356
#!/usr/bin/env bash # Use this script as follows ./download_from_gcp.sh /path/to/folder/to/store/downloads folder_to_store_downloads=${1} # Replace by gcp_path to T5 cloud bucket folder here # To download the official `t5-small` model of https://github.com/google-research/text-to-text-transfer-transformer#released-model-checkpoints: gcp_path="gs://t5-data/pretrained_models/small" # Number of files the checkpoint is split into num_of_checks=16 # Create dir if not exist mkdir -p ${folder_to_store_downloads} # Copy all meta information files gsutil cp "${gcp_path}/operative_config.gin" ${folder_to_store_downloads} gsutil cp "${gcp_path}/checkpoint" ${folder_to_store_downloads} gsutil cp "${gcp_path}/model.ckpt-1000000.index" ${folder_to_store_downloads} gsutil cp "${gcp_path}/model.ckpt-1000000.meta" ${folder_to_store_downloads} # Copy all model weights # single digit num checkpoitns for ((i = 0 ; i < ${num_of_checks} ; i++)); do gsutil cp "${gcp_path}/model.ckpt-1000000.data-0000${i}-of-000${num_of_checks}" ${folder_to_store_downloads} done # double digit num checkpoints for ((i = 0 ; i < ${num_of_checks} ; i++)); do gsutil cp "${gcp_path}/model.ckpt-1000000.data-000${i}-of-000${num_of_checks}" ${folder_to_store_downloads} done # Having run this script, you should create a suitable config.json, *e.g.* by # looking at `https://huggingface.co/t5-small`. # Then you can run `python convert_t5_original_tf_checkpoint_to_pytorch.py --tf_checkpoint_path "${folder_to_store_downloads}" --config_file "config.json" --pytorch_dump_path "/path/to/store/pytorch/weights"
transformers/src/transformers/models/t5/download_from_gcp.sh/0
{ "file_path": "transformers/src/transformers/models/t5/download_from_gcp.sh", "repo_id": "transformers", "token_count": 579 }
357
# coding=utf-8 # Copyright 2020 Google Research and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for TAPAS model.""" import collections import datetime import enum import itertools import math import os import re import unicodedata from dataclasses import dataclass from typing import Callable, Dict, Generator, List, Optional, Text, Tuple, Union import numpy as np from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, VERY_LARGE_INTEGER, BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, ) from ...utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available, logging if is_pandas_available(): import pandas as pd logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { # large models "google/tapas-large-finetuned-sqa": ( "https://huggingface.co/google/tapas-large-finetuned-sqa/resolve/main/vocab.txt" ), "google/tapas-large-finetuned-wtq": ( "https://huggingface.co/google/tapas-large-finetuned-wtq/resolve/main/vocab.txt" ), "google/tapas-large-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-large-finetuned-wikisql-supervised/resolve/main/vocab.txt" ), "google/tapas-large-finetuned-tabfact": ( "https://huggingface.co/google/tapas-large-finetuned-tabfact/resolve/main/vocab.txt" ), # base models "google/tapas-base-finetuned-sqa": ( "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/vocab.txt" ), "google/tapas-base-finetuned-wtq": ( "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/vocab.txt" ), "google/tapas-base-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/vocab.txt" ), "google/tapas-base-finetuned-tabfact": ( "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/vocab.txt" ), # medium models "google/tapas-medium-finetuned-sqa": ( "https://huggingface.co/google/tapas-medium-finetuned-sqa/resolve/main/vocab.txt" ), "google/tapas-medium-finetuned-wtq": ( "https://huggingface.co/google/tapas-medium-finetuned-wtq/resolve/main/vocab.txt" ), "google/tapas-medium-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-medium-finetuned-wikisql-supervised/resolve/main/vocab.txt" ), "google/tapas-medium-finetuned-tabfact": ( "https://huggingface.co/google/tapas-medium-finetuned-tabfact/resolve/main/vocab.txt" ), # small models "google/tapas-small-finetuned-sqa": ( "https://huggingface.co/google/tapas-small-finetuned-sqa/resolve/main/vocab.txt" ), "google/tapas-small-finetuned-wtq": ( "https://huggingface.co/google/tapas-small-finetuned-wtq/resolve/main/vocab.txt" ), "google/tapas-small-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-small-finetuned-wikisql-supervised/resolve/main/vocab.txt" ), "google/tapas-small-finetuned-tabfact": ( "https://huggingface.co/google/tapas-small-finetuned-tabfact/resolve/main/vocab.txt" ), # tiny models "google/tapas-tiny-finetuned-sqa": ( "https://huggingface.co/google/tapas-tiny-finetuned-sqa/resolve/main/vocab.txt" ), "google/tapas-tiny-finetuned-wtq": ( "https://huggingface.co/google/tapas-tiny-finetuned-wtq/resolve/main/vocab.txt" ), "google/tapas-tiny-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-tiny-finetuned-wikisql-supervised/resolve/main/vocab.txt" ), "google/tapas-tiny-finetuned-tabfact": ( "https://huggingface.co/google/tapas-tiny-finetuned-tabfact/resolve/main/vocab.txt" ), # mini models "google/tapas-mini-finetuned-sqa": ( "https://huggingface.co/google/tapas-mini-finetuned-sqa/resolve/main/vocab.txt" ), "google/tapas-mini-finetuned-wtq": ( "https://huggingface.co/google/tapas-mini-finetuned-wtq/resolve/main/vocab.txt" ), "google/tapas-mini-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-mini-finetuned-wikisql-supervised/resolve/main/vocab.txt" ), "google/tapas-mini-finetuned-tabfact": ( "https://huggingface.co/google/tapas-mini-finetuned-tabfact/resolve/main/vocab.txt" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {name: 512 for name in PRETRAINED_VOCAB_FILES_MAP.keys()} PRETRAINED_INIT_CONFIGURATION = {name: {"do_lower_case": True} for name in PRETRAINED_VOCAB_FILES_MAP.keys()} class TapasTruncationStrategy(ExplicitEnum): """ Possible values for the `truncation` argument in [`~TapasTokenizer.__call__`]. Useful for tab-completion in an IDE. """ DROP_ROWS_TO_FIT = "drop_rows_to_fit" DO_NOT_TRUNCATE = "do_not_truncate" TableValue = collections.namedtuple("TokenValue", ["token", "column_id", "row_id"]) @dataclass(frozen=True) class TokenCoordinates: column_index: int row_index: int token_index: int @dataclass class TokenizedTable: rows: List[List[List[Text]]] selected_tokens: List[TokenCoordinates] @dataclass(frozen=True) class SerializedExample: tokens: List[Text] column_ids: List[int] row_ids: List[int] segment_ids: List[int] def _is_inner_wordpiece(token: Text): return token.startswith("##") def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`TapasTruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate row by row, removing rows from the table. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. is_split_into_words (`bool`, *optional*, defaults to `False`): Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. """ class TapasTokenizer(PreTrainedTokenizer): r""" Construct a TAPAS tokenizer. Based on WordPiece. Flattens a table and one or more related sentences to be used by TAPAS models. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. [`TapasTokenizer`] creates several token type ids to encode tabular structure. To be more precise, it adds 7 token type ids, in the following order: `segment_ids`, `column_ids`, `row_ids`, `prev_labels`, `column_ranks`, `inv_column_ranks` and `numeric_relations`: - segment_ids: indicate whether a token belongs to the question (0) or the table (1). 0 for special tokens and padding. - column_ids: indicate to which column of the table a token belongs (starting from 1). Is 0 for all question tokens, special tokens and padding. - row_ids: indicate to which row of the table a token belongs (starting from 1). Is 0 for all question tokens, special tokens and padding. Tokens of column headers are also 0. - prev_labels: indicate whether a token was (part of) an answer to the previous question (1) or not (0). Useful in a conversational setup (such as SQA). - column_ranks: indicate the rank of a table token relative to a column, if applicable. For example, if you have a column "number of movies" with values 87, 53 and 69, then the column ranks of these tokens are 3, 1 and 2 respectively. 0 for all question tokens, special tokens and padding. - inv_column_ranks: indicate the inverse rank of a table token relative to a column, if applicable. For example, if you have a column "number of movies" with values 87, 53 and 69, then the inverse column ranks of these tokens are 1, 3 and 2 respectively. 0 for all question tokens, special tokens and padding. - numeric_relations: indicate numeric relations between the question and the tokens of the table. 0 for all question tokens, special tokens and padding. [`TapasTokenizer`] runs end-to-end tokenization on a table and associated sentences: punctuation splitting and wordpiece. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. empty_token (`str`, *optional*, defaults to `"[EMPTY]"`): The token used for empty cell values in a table. Empty cell values include "", "n/a", "nan" and "?". tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). cell_trim_length (`int`, *optional*, defaults to -1): If > 0: Trim cells so that the length is <= this value. Also disables further cell trimming, should thus be used with `truncation` set to `True`. max_column_id (`int`, *optional*): Max column id to extract. max_row_id (`int`, *optional*): Max row id to extract. strip_column_names (`bool`, *optional*, defaults to `False`): Whether to add empty strings instead of column names. update_answer_coordinates (`bool`, *optional*, defaults to `False`): Whether to recompute the answer coordinates from the answer text. min_question_length (`int`, *optional*): Minimum length of each question in terms of tokens (will be skipped otherwise). max_question_length (`int`, *optional*): Maximum length of each question in terms of tokens (will be skipped otherwise). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", empty_token="[EMPTY]", tokenize_chinese_chars=True, strip_accents=None, cell_trim_length: int = -1, max_column_id: int = None, max_row_id: int = None, strip_column_names: bool = False, update_answer_coordinates: bool = False, min_question_length=None, max_question_length=None, model_max_length: int = 512, additional_special_tokens: Optional[List[str]] = None, **kwargs, ): if not is_pandas_available(): raise ImportError("Pandas is required for the TAPAS tokenizer.") if additional_special_tokens is not None: if empty_token not in additional_special_tokens: additional_special_tokens.append(empty_token) else: additional_special_tokens = [empty_token] if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) # Additional properties self.cell_trim_length = cell_trim_length self.max_column_id = ( max_column_id if max_column_id is not None else model_max_length if model_max_length is not None else VERY_LARGE_INTEGER ) self.max_row_id = ( max_row_id if max_row_id is not None else model_max_length if model_max_length is not None else VERY_LARGE_INTEGER ) self.strip_column_names = strip_column_names self.update_answer_coordinates = update_answer_coordinates self.min_question_length = min_question_length self.max_question_length = max_question_length super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, empty_token=empty_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, cell_trim_length=cell_trim_length, max_column_id=max_column_id, max_row_id=max_row_id, strip_column_names=strip_column_names, update_answer_coordinates=update_answer_coordinates, min_question_length=min_question_length, max_question_length=max_question_length, model_max_length=model_max_length, additional_special_tokens=additional_special_tokens, **kwargs, ) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): if format_text(text) == EMPTY_TEXT: return [self.additional_special_tokens[0]] split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) def create_attention_mask_from_sequences(self, query_ids: List[int], table_values: List[TableValue]) -> List[int]: """ Creates the attention mask according to the query token IDs and a list of table values. Args: query_ids (`List[int]`): list of token IDs corresponding to the ID. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the token value, the column ID and the row ID of said token. Returns: `List[int]`: List of ints containing the attention mask values. """ return [1] * (1 + len(query_ids) + 1 + len(table_values)) def create_segment_token_type_ids_from_sequences( self, query_ids: List[int], table_values: List[TableValue] ) -> List[int]: """ Creates the segment token type IDs according to the query token IDs and a list of table values. Args: query_ids (`List[int]`): list of token IDs corresponding to the ID. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the token value, the column ID and the row ID of said token. Returns: `List[int]`: List of ints containing the segment token type IDs values. """ table_ids = list(zip(*table_values))[0] if table_values else [] return [0] * (1 + len(query_ids) + 1) + [1] * len(table_ids) def create_column_token_type_ids_from_sequences( self, query_ids: List[int], table_values: List[TableValue] ) -> List[int]: """ Creates the column token type IDs according to the query token IDs and a list of table values. Args: query_ids (`List[int]`): list of token IDs corresponding to the ID. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the token value, the column ID and the row ID of said token. Returns: `List[int]`: List of ints containing the column token type IDs values. """ table_column_ids = list(zip(*table_values))[1] if table_values else [] return [0] * (1 + len(query_ids) + 1) + list(table_column_ids) def create_row_token_type_ids_from_sequences( self, query_ids: List[int], table_values: List[TableValue] ) -> List[int]: """ Creates the row token type IDs according to the query token IDs and a list of table values. Args: query_ids (`List[int]`): list of token IDs corresponding to the ID. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the token value, the column ID and the row ID of said token. Returns: `List[int]`: List of ints containing the row token type IDs values. """ table_row_ids = list(zip(*table_values))[2] if table_values else [] return [0] * (1 + len(query_ids) + 1) + list(table_row_ids) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a question and flattened table for question answering or sequence classification tasks by concatenating and adding special tokens. Args: token_ids_0 (`List[int]`): The ids of the question. token_ids_1 (`List[int]`, *optional*): The ids of the flattened table. Returns: `List[int]`: The model input with special tokens. """ if token_ids_1 is None: raise ValueError("With TAPAS, you must provide both question IDs and table IDs.") return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1 def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of question IDs. token_ids_1 (`List[int]`, *optional*): List of flattened table IDs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) return [1] + ([0] * len(token_ids_0)) + [1] @add_end_docstrings(TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, table: "pd.DataFrame", queries: Optional[ Union[ TextInput, PreTokenizedInput, EncodedInput, List[TextInput], List[PreTokenizedInput], List[EncodedInput], ] ] = None, answer_coordinates: Optional[Union[List[Tuple], List[List[Tuple]]]] = None, answer_text: Optional[Union[List[TextInput], List[List[TextInput]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) related to a table. Args: table (`pd.DataFrame`): Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas dataframe to convert it to string. queries (`str` or `List[str]`): Question or batch of questions related to a table to be encoded. Note that in case of a batch, all questions must refer to the **same** table. answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*): Answer coordinates of each table-question pair in the batch. In case only a single table-question pair is provided, then the answer_coordinates must be a single list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The first column has index 0. In case a batch of table-question pairs is provided, then the answer_coordinates must be a list of lists of tuples (each list corresponding to a single table-question pair). answer_text (`List[str]` or `List[List[str]]`, *optional*): Answer text of each table-question pair in the batch. In case only a single table-question pair is provided, then the answer_text must be a single list of one or more strings. Each string must be the answer text of a corresponding answer coordinate. In case a batch of table-question pairs is provided, then the answer_coordinates must be a list of lists of strings (each list corresponding to a single table-question pair). """ assert isinstance(table, pd.DataFrame), "Table must be of type pd.DataFrame" # Input type checking for clearer error valid_query = False # Check that query has a valid type if queries is None or isinstance(queries, str): valid_query = True elif isinstance(queries, (list, tuple)): if len(queries) == 0 or isinstance(queries[0], str): valid_query = True if not valid_query: raise ValueError( "queries input must of type `str` (single example), `List[str]` (batch or single pretokenized" " example). " ) is_batched = isinstance(queries, (list, tuple)) if is_batched: return self.batch_encode_plus( table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( table=table, query=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus( self, table: "pd.DataFrame", queries: Optional[ Union[ List[TextInput], List[PreTokenizedInput], List[EncodedInput], ] ] = None, answer_coordinates: Optional[List[List[Tuple]]] = None, answer_text: Optional[List[List[TextInput]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Prepare a table and a list of strings for the model. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: table (`pd.DataFrame`): Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas dataframe to convert it to string. queries (`List[str]`): Batch of questions related to a table to be encoded. Note that all questions must refer to the **same** table. answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*): Answer coordinates of each table-question pair in the batch. Each tuple must be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The first column has index 0. The answer_coordinates must be a list of lists of tuples (each list corresponding to a single table-question pair). answer_text (`List[str]` or `List[List[str]]`, *optional*): Answer text of each table-question pair in the batch. In case a batch of table-question pairs is provided, then the answer_coordinates must be a list of lists of strings (each list corresponding to a single table-question pair). Each string must be the answer text of a corresponding answer coordinate. """ if return_token_type_ids is not None and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) if (answer_coordinates and not answer_text) or (not answer_coordinates and answer_text): raise ValueError("In case you provide answers, both answer_coordinates and answer_text should be provided") elif answer_coordinates is None and answer_text is None: answer_coordinates = answer_text = [None] * len(queries) if "is_split_into_words" in kwargs: raise NotImplementedError("Currently TapasTokenizer only supports questions as strings.") if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) return self._batch_encode_plus( table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _get_question_tokens(self, query): """Tokenizes the query, taking into account the max and min question length.""" query_tokens = self.tokenize(query) if self.max_question_length is not None and len(query_tokens) > self.max_question_length: logger.warning("Skipping query as its tokens are longer than the max question length") return "", [] if self.min_question_length is not None and len(query_tokens) < self.min_question_length: logger.warning("Skipping query as its tokens are shorter than the min question length") return "", [] return query, query_tokens def _batch_encode_plus( self, table, queries: Union[ List[TextInput], List[PreTokenizedInput], List[EncodedInput], ], answer_coordinates: Optional[List[List[Tuple]]] = None, answer_text: Optional[List[List[TextInput]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = True, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: table_tokens = self._tokenize_table(table) queries_tokens = [] for idx, query in enumerate(queries): query, query_tokens = self._get_question_tokens(query) queries[idx] = query queries_tokens.append(query_tokens) batch_outputs = self._batch_prepare_for_model( table, queries, tokenized_table=table_tokens, queries_tokens=queries_tokens, answer_coordinates=answer_coordinates, padding=padding, truncation=truncation, answer_text=answer_text, add_special_tokens=add_special_tokens, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) return BatchEncoding(batch_outputs) def _batch_prepare_for_model( self, raw_table: "pd.DataFrame", raw_queries: Union[ List[TextInput], List[PreTokenizedInput], List[EncodedInput], ], tokenized_table: Optional[TokenizedTable] = None, queries_tokens: Optional[List[List[str]]] = None, answer_coordinates: Optional[List[List[Tuple]]] = None, answer_text: Optional[List[List[TextInput]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = True, return_attention_mask: Optional[bool] = True, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: batch_outputs = {} for index, example in enumerate(zip(raw_queries, queries_tokens, answer_coordinates, answer_text)): raw_query, query_tokens, answer_coords, answer_txt = example outputs = self.prepare_for_model( raw_table, raw_query, tokenized_table=tokenized_table, query_tokens=query_tokens, answer_coordinates=answer_coords, answer_text=answer_txt, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards truncation=truncation, max_length=max_length, pad_to_multiple_of=None, # we pad in batch afterwards return_attention_mask=False, # we pad in batch afterwards return_token_type_ids=return_token_type_ids, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, prev_answer_coordinates=answer_coordinates[index - 1] if index != 0 else None, prev_answer_text=answer_text[index - 1] if index != 0 else None, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING) def encode( self, table: "pd.DataFrame", query: Optional[ Union[ TextInput, PreTokenizedInput, EncodedInput, ] ] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> List[int]: """ Prepare a table and a string for the model. This method does not return token type IDs, attention masks, etc. which are necessary for the model to work correctly. Use that method if you want to build your processing on your own, otherwise refer to `__call__`. Args: table (`pd.DataFrame`): Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas dataframe to convert it to string. query (`str` or `List[str]`): Question related to a table to be encoded. """ encoded_inputs = self.encode_plus( table, query=query, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, **kwargs, ) return encoded_inputs["input_ids"] @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus( self, table: "pd.DataFrame", query: Optional[ Union[ TextInput, PreTokenizedInput, EncodedInput, ] ] = None, answer_coordinates: Optional[List[Tuple]] = None, answer_text: Optional[List[TextInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Prepare a table and a string for the model. Args: table (`pd.DataFrame`): Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas dataframe to convert it to string. query (`str` or `List[str]`): Question related to a table to be encoded. answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*): Answer coordinates of each table-question pair in the batch. The answer_coordinates must be a single list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The first column has index 0. answer_text (`List[str]` or `List[List[str]]`, *optional*): Answer text of each table-question pair in the batch. The answer_text must be a single list of one or more strings. Each string must be the answer text of a corresponding answer coordinate. """ if return_token_type_ids is not None and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) if (answer_coordinates and not answer_text) or (not answer_coordinates and answer_text): raise ValueError("In case you provide answers, both answer_coordinates and answer_text should be provided") if "is_split_into_words" in kwargs: raise NotImplementedError("Currently TapasTokenizer only supports questions as strings.") if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) return self._encode_plus( table=table, query=query, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, truncation=truncation, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _encode_plus( self, table: "pd.DataFrame", query: Union[ TextInput, PreTokenizedInput, EncodedInput, ], answer_coordinates: Optional[List[Tuple]] = None, answer_text: Optional[List[TextInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = True, return_attention_mask: Optional[bool] = True, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ): if query is None: query = "" logger.warning( "TAPAS is a question answering model but you have not passed a query. Please be aware that the " "model will probably not behave correctly." ) table_tokens = self._tokenize_table(table) query, query_tokens = self._get_question_tokens(query) return self.prepare_for_model( table, query, tokenized_table=table_tokens, query_tokens=query_tokens, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, truncation=truncation, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, raw_table: "pd.DataFrame", raw_query: Union[ TextInput, PreTokenizedInput, EncodedInput, ], tokenized_table: Optional[TokenizedTable] = None, query_tokens: Optional[TokenizedTable] = None, answer_coordinates: Optional[List[Tuple]] = None, answer_text: Optional[List[TextInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TapasTruncationStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = True, return_attention_mask: Optional[bool] = True, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: """ Prepares a sequence of input id so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens. Args: raw_table (`pd.DataFrame`): The original table before any transformation (like tokenization) was applied to it. raw_query (`TextInput` or `PreTokenizedInput` or `EncodedInput`): The original query before any transformation (like tokenization) was applied to it. tokenized_table (`TokenizedTable`): The table after tokenization. query_tokens (`List[str]`): The query after tokenization. answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*): Answer coordinates of each table-question pair in the batch. The answer_coordinates must be a single list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The first column has index 0. answer_text (`List[str]` or `List[List[str]]`, *optional*): Answer text of each table-question pair in the batch. The answer_text must be a single list of one or more strings. Each string must be the answer text of a corresponding answer coordinate. """ if isinstance(padding, bool): if padding and (max_length is not None or pad_to_multiple_of is not None): padding = PaddingStrategy.MAX_LENGTH else: padding = PaddingStrategy.DO_NOT_PAD elif not isinstance(padding, PaddingStrategy): padding = PaddingStrategy(padding) if isinstance(truncation, bool): if truncation: truncation = TapasTruncationStrategy.DROP_ROWS_TO_FIT else: truncation = TapasTruncationStrategy.DO_NOT_TRUNCATE elif not isinstance(truncation, TapasTruncationStrategy): truncation = TapasTruncationStrategy(truncation) encoded_inputs = {} is_part_of_batch = False prev_answer_coordinates, prev_answer_text = None, None if "prev_answer_coordinates" in kwargs and "prev_answer_text" in kwargs: is_part_of_batch = True prev_answer_coordinates = kwargs["prev_answer_coordinates"] prev_answer_text = kwargs["prev_answer_text"] num_rows = self._get_num_rows(raw_table, truncation != TapasTruncationStrategy.DO_NOT_TRUNCATE) num_columns = self._get_num_columns(raw_table) _, _, num_tokens = self._get_table_boundaries(tokenized_table) if truncation != TapasTruncationStrategy.DO_NOT_TRUNCATE: num_rows, num_tokens = self._get_truncated_table_rows( query_tokens, tokenized_table, num_rows, num_columns, max_length, truncation_strategy=truncation ) table_data = list(self._get_table_values(tokenized_table, num_columns, num_rows, num_tokens)) query_ids = self.convert_tokens_to_ids(query_tokens) table_ids = list(zip(*table_data))[0] if len(table_data) > 0 else list(zip(*table_data)) table_ids = self.convert_tokens_to_ids(list(table_ids)) if "return_overflowing_tokens" in kwargs and kwargs["return_overflowing_tokens"]: raise ValueError("TAPAS does not return overflowing tokens as it works on tables.") if add_special_tokens: input_ids = self.build_inputs_with_special_tokens(query_ids, table_ids) else: input_ids = query_ids + table_ids if max_length is not None and len(input_ids) > max_length: raise ValueError( "Could not encode the query and table header given the maximum length. Encoding the query and table " f"header results in a length of {len(input_ids)} which is higher than the max_length of {max_length}" ) encoded_inputs["input_ids"] = input_ids segment_ids = self.create_segment_token_type_ids_from_sequences(query_ids, table_data) column_ids = self.create_column_token_type_ids_from_sequences(query_ids, table_data) row_ids = self.create_row_token_type_ids_from_sequences(query_ids, table_data) if not is_part_of_batch or (prev_answer_coordinates is None and prev_answer_text is None): # simply set the prev_labels to zeros prev_labels = [0] * len(row_ids) else: prev_labels = self.get_answer_ids( column_ids, row_ids, table_data, prev_answer_text, prev_answer_coordinates ) # FIRST: parse both the table and question in terms of numeric values raw_table = add_numeric_table_values(raw_table) raw_query = add_numeric_values_to_question(raw_query) # SECOND: add numeric-related features (and not parse them in these functions): column_ranks, inv_column_ranks = self._get_numeric_column_ranks(column_ids, row_ids, raw_table) numeric_relations = self._get_numeric_relations(raw_query, column_ids, row_ids, raw_table) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names if return_attention_mask: attention_mask = self.create_attention_mask_from_sequences(query_ids, table_data) encoded_inputs["attention_mask"] = attention_mask if answer_coordinates is not None and answer_text is not None: labels = self.get_answer_ids(column_ids, row_ids, table_data, answer_text, answer_coordinates) numeric_values = self._get_numeric_values(raw_table, column_ids, row_ids) numeric_values_scale = self._get_numeric_values_scale(raw_table, column_ids, row_ids) encoded_inputs["labels"] = labels encoded_inputs["numeric_values"] = numeric_values encoded_inputs["numeric_values_scale"] = numeric_values_scale if return_token_type_ids: token_type_ids = [ segment_ids, column_ids, row_ids, prev_labels, column_ranks, inv_column_ranks, numeric_relations, ] token_type_ids = [list(ids) for ids in list(zip(*token_type_ids))] encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(query_ids, table_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(input_ids) # Check lengths if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length and verbose: if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False): logger.warning( "Token indices sequence length is longer than the specified maximum sequence length " f"for this model ({len(encoded_inputs['input_ids'])} > {self.model_max_length}). Running this " "sequence through the model will result in indexing errors." ) self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True # Padding if padding != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def _get_truncated_table_rows( self, query_tokens: List[str], tokenized_table: TokenizedTable, num_rows: int, num_columns: int, max_length: int, truncation_strategy: Union[str, TapasTruncationStrategy], ) -> Tuple[int, int]: """ Truncates a sequence pair in-place following the strategy. Args: query_tokens (`List[str]`): List of strings corresponding to the tokenized query. tokenized_table (`TokenizedTable`): Tokenized table num_rows (`int`): Total number of table rows num_columns (`int`): Total number of table columns max_length (`int`): Total maximum length. truncation_strategy (`str` or [`TapasTruncationStrategy`]): Truncation strategy to use. Seeing as this method should only be called when truncating, the only available strategy is the `"drop_rows_to_fit"` strategy. Returns: `Tuple(int, int)`: tuple containing the number of rows after truncation, and the number of tokens available for each table element. """ if not isinstance(truncation_strategy, TapasTruncationStrategy): truncation_strategy = TapasTruncationStrategy(truncation_strategy) if max_length is None: max_length = self.model_max_length if truncation_strategy == TapasTruncationStrategy.DROP_ROWS_TO_FIT: while True: num_tokens = self._get_max_num_tokens( query_tokens, tokenized_table, num_rows=num_rows, num_columns=num_columns, max_length=max_length ) if num_tokens is not None: # We could fit the table. break # Try to drop a row to fit the table. num_rows -= 1 if num_rows < 1: break elif truncation_strategy != TapasTruncationStrategy.DO_NOT_TRUNCATE: raise ValueError(f"Unknown truncation strategy {truncation_strategy}.") return num_rows, num_tokens or 1 def _tokenize_table( self, table=None, ): """ Tokenizes column headers and cell texts of a table. Args: table (`pd.Dataframe`): Table. Returns: `TokenizedTable`: TokenizedTable object. """ tokenized_rows = [] tokenized_row = [] # tokenize column headers for column in table: if self.strip_column_names: tokenized_row.append(self.tokenize("")) else: tokenized_row.append(self.tokenize(column)) tokenized_rows.append(tokenized_row) # tokenize cell values for idx, row in table.iterrows(): tokenized_row = [] for cell in row: tokenized_row.append(self.tokenize(cell)) tokenized_rows.append(tokenized_row) token_coordinates = [] for row_index, row in enumerate(tokenized_rows): for column_index, cell in enumerate(row): for token_index, _ in enumerate(cell): token_coordinates.append( TokenCoordinates( row_index=row_index, column_index=column_index, token_index=token_index, ) ) return TokenizedTable( rows=tokenized_rows, selected_tokens=token_coordinates, ) def _question_encoding_cost(self, question_tokens): # Two extra spots of SEP and CLS. return len(question_tokens) + 2 def _get_token_budget(self, question_tokens, max_length=None): """ Computes the number of tokens left for the table after tokenizing a question, taking into account the max sequence length of the model. Args: question_tokens (`List[String]`): List of question tokens. Returns: `int`: the number of tokens left for the table, given the model max length. """ return (max_length if max_length is not None else self.model_max_length) - self._question_encoding_cost( question_tokens ) def _get_table_values(self, table, num_columns, num_rows, num_tokens) -> Generator[TableValue, None, None]: """Iterates over partial table and returns token, column and row indexes.""" for tc in table.selected_tokens: # First row is header row. if tc.row_index >= num_rows + 1: continue if tc.column_index >= num_columns: continue cell = table.rows[tc.row_index][tc.column_index] token = cell[tc.token_index] word_begin_index = tc.token_index # Don't add partial words. Find the starting word piece and check if it # fits in the token budget. while word_begin_index >= 0 and _is_inner_wordpiece(cell[word_begin_index]): word_begin_index -= 1 if word_begin_index >= num_tokens: continue yield TableValue(token, tc.column_index + 1, tc.row_index) def _get_table_boundaries(self, table): """Return maximal number of rows, columns and tokens.""" max_num_tokens = 0 max_num_columns = 0 max_num_rows = 0 for tc in table.selected_tokens: max_num_columns = max(max_num_columns, tc.column_index + 1) max_num_rows = max(max_num_rows, tc.row_index + 1) max_num_tokens = max(max_num_tokens, tc.token_index + 1) max_num_columns = min(self.max_column_id, max_num_columns) max_num_rows = min(self.max_row_id, max_num_rows) return max_num_rows, max_num_columns, max_num_tokens def _get_table_cost(self, table, num_columns, num_rows, num_tokens): return sum(1 for _ in self._get_table_values(table, num_columns, num_rows, num_tokens)) def _get_max_num_tokens(self, question_tokens, tokenized_table, num_columns, num_rows, max_length): """Computes max number of tokens that can be squeezed into the budget.""" token_budget = self._get_token_budget(question_tokens, max_length) _, _, max_num_tokens = self._get_table_boundaries(tokenized_table) if self.cell_trim_length >= 0 and max_num_tokens > self.cell_trim_length: max_num_tokens = self.cell_trim_length num_tokens = 0 for num_tokens in range(max_num_tokens + 1): cost = self._get_table_cost(tokenized_table, num_columns, num_rows, num_tokens + 1) if cost > token_budget: break if num_tokens < max_num_tokens: if self.cell_trim_length >= 0: # We don't allow dynamic trimming if a cell_trim_length is set. return None if num_tokens == 0: return None return num_tokens def _get_num_columns(self, table): num_columns = table.shape[1] if num_columns >= self.max_column_id: raise ValueError("Too many columns") return num_columns def _get_num_rows(self, table, drop_rows_to_fit): num_rows = table.shape[0] if num_rows >= self.max_row_id: if drop_rows_to_fit: num_rows = self.max_row_id - 1 else: raise ValueError("Too many rows") return num_rows def _serialize_text(self, question_tokens): """Serializes texts in index arrays.""" tokens = [] segment_ids = [] column_ids = [] row_ids = [] # add [CLS] token at the beginning tokens.append(self.cls_token) segment_ids.append(0) column_ids.append(0) row_ids.append(0) for token in question_tokens: tokens.append(token) segment_ids.append(0) column_ids.append(0) row_ids.append(0) return tokens, segment_ids, column_ids, row_ids def _serialize( self, question_tokens, table, num_columns, num_rows, num_tokens, ): """Serializes table and text.""" tokens, segment_ids, column_ids, row_ids = self._serialize_text(question_tokens) # add [SEP] token between question and table tokens tokens.append(self.sep_token) segment_ids.append(0) column_ids.append(0) row_ids.append(0) for token, column_id, row_id in self._get_table_values(table, num_columns, num_rows, num_tokens): tokens.append(token) segment_ids.append(1) column_ids.append(column_id) row_ids.append(row_id) return SerializedExample( tokens=tokens, segment_ids=segment_ids, column_ids=column_ids, row_ids=row_ids, ) def _get_column_values(self, table, col_index): table_numeric_values = {} for row_index, row in table.iterrows(): cell = row[col_index] if cell.numeric_value is not None: table_numeric_values[row_index] = cell.numeric_value return table_numeric_values def _get_cell_token_indexes(self, column_ids, row_ids, column_id, row_id): for index in range(len(column_ids)): if column_ids[index] - 1 == column_id and row_ids[index] - 1 == row_id: yield index def _get_numeric_column_ranks(self, column_ids, row_ids, table): """Returns column ranks for all numeric columns.""" ranks = [0] * len(column_ids) inv_ranks = [0] * len(column_ids) # original code from tf_example_utils.py of the original implementation if table is not None: for col_index in range(len(table.columns)): table_numeric_values = self._get_column_values(table, col_index) if not table_numeric_values: continue try: key_fn = get_numeric_sort_key_fn(table_numeric_values.values()) except ValueError: continue table_numeric_values = {row_index: key_fn(value) for row_index, value in table_numeric_values.items()} table_numeric_values_inv = collections.defaultdict(list) for row_index, value in table_numeric_values.items(): table_numeric_values_inv[value].append(row_index) unique_values = sorted(table_numeric_values_inv.keys()) for rank, value in enumerate(unique_values): for row_index in table_numeric_values_inv[value]: for index in self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index): ranks[index] = rank + 1 inv_ranks[index] = len(unique_values) - rank return ranks, inv_ranks def _get_numeric_sort_key_fn(self, table_numeric_values, value): """ Returns the sort key function for comparing value to table values. The function returned will be a suitable input for the key param of the sort(). See number_annotation_utils._get_numeric_sort_key_fn for details Args: table_numeric_values: Numeric values of a column value: Numeric value in the question Returns: A function key function to compare column and question values. """ if not table_numeric_values: return None all_values = list(table_numeric_values.values()) all_values.append(value) try: return get_numeric_sort_key_fn(all_values) except ValueError: return None def _get_numeric_relations(self, question, column_ids, row_ids, table): """ Returns numeric relations embeddings Args: question: Question object. column_ids: Maps word piece position to column id. row_ids: Maps word piece position to row id. table: The table containing the numeric cell values. """ numeric_relations = [0] * len(column_ids) # first, we add any numeric value spans to the question: # Create a dictionary that maps a table cell to the set of all relations # this cell has with any value in the question. cell_indices_to_relations = collections.defaultdict(set) if question is not None and table is not None: for numeric_value_span in question.numeric_spans: for value in numeric_value_span.values: for column_index in range(len(table.columns)): table_numeric_values = self._get_column_values(table, column_index) sort_key_fn = self._get_numeric_sort_key_fn(table_numeric_values, value) if sort_key_fn is None: continue for row_index, cell_value in table_numeric_values.items(): relation = get_numeric_relation(value, cell_value, sort_key_fn) if relation is not None: cell_indices_to_relations[column_index, row_index].add(relation) # For each cell add a special feature for all its word pieces. for (column_index, row_index), relations in cell_indices_to_relations.items(): relation_set_index = 0 for relation in relations: assert relation.value >= Relation.EQ.value relation_set_index += 2 ** (relation.value - Relation.EQ.value) for cell_token_index in self._get_cell_token_indexes(column_ids, row_ids, column_index, row_index): numeric_relations[cell_token_index] = relation_set_index return numeric_relations def _get_numeric_values(self, table, column_ids, row_ids): """Returns numeric values for computation of answer loss.""" numeric_values = [float("nan")] * len(column_ids) if table is not None: num_rows = table.shape[0] num_columns = table.shape[1] for col_index in range(num_columns): for row_index in range(num_rows): numeric_value = table.iloc[row_index, col_index].numeric_value if numeric_value is not None: if numeric_value.float_value is None: continue float_value = numeric_value.float_value if float_value == float("inf"): continue for index in self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index): numeric_values[index] = float_value return numeric_values def _get_numeric_values_scale(self, table, column_ids, row_ids): """Returns a scale to each token to down weigh the value of long words.""" numeric_values_scale = [1.0] * len(column_ids) if table is None: return numeric_values_scale num_rows = table.shape[0] num_columns = table.shape[1] for col_index in range(num_columns): for row_index in range(num_rows): indices = list(self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index)) num_indices = len(indices) if num_indices > 1: for index in indices: numeric_values_scale[index] = float(num_indices) return numeric_values_scale def _pad_to_seq_length(self, inputs): while len(inputs) > self.model_max_length: inputs.pop() while len(inputs) < self.model_max_length: inputs.append(0) def _get_all_answer_ids_from_coordinates( self, column_ids, row_ids, answers_list, ): """Maps lists of answer coordinates to token indexes.""" answer_ids = [0] * len(column_ids) found_answers = set() all_answers = set() for answers in answers_list: column_index, row_index = answers all_answers.add((column_index, row_index)) for index in self._get_cell_token_indexes(column_ids, row_ids, column_index, row_index): found_answers.add((column_index, row_index)) answer_ids[index] = 1 missing_count = len(all_answers) - len(found_answers) return answer_ids, missing_count def _get_all_answer_ids(self, column_ids, row_ids, answer_coordinates): """ Maps answer coordinates of a question to token indexes. In the SQA format (TSV), the coordinates are given as (row, column) tuples. Here, we first swap them to (column, row) format before calling _get_all_answer_ids_from_coordinates. """ def _to_coordinates(answer_coordinates_question): return [(coords[1], coords[0]) for coords in answer_coordinates_question] return self._get_all_answer_ids_from_coordinates( column_ids, row_ids, answers_list=(_to_coordinates(answer_coordinates)) ) def _find_tokens(self, text, segment): """Return start index of segment in text or None.""" logging.info(f"text: {text} {segment}") for index in range(1 + len(text) - len(segment)): for seg_index, seg_token in enumerate(segment): if text[index + seg_index].piece != seg_token.piece: break else: return index return None def _find_answer_coordinates_from_answer_text( self, tokenized_table, answer_text, ): """Returns all occurrences of answer_text in the table.""" logging.info(f"answer text: {answer_text}") for row_index, row in enumerate(tokenized_table.rows): if row_index == 0: # We don't search for answers in the header. continue for col_index, cell in enumerate(row): token_index = self._find_tokens(cell, answer_text) if token_index is not None: yield TokenCoordinates( row_index=row_index, column_index=col_index, token_index=token_index, ) def _find_answer_ids_from_answer_texts( self, column_ids, row_ids, tokenized_table, answer_texts, ): """Maps question with answer texts to the first matching token indexes.""" answer_ids = [0] * len(column_ids) for answer_text in answer_texts: for coordinates in self._find_answer_coordinates_from_answer_text( tokenized_table, answer_text, ): # Maps answer coordinates to indexes this can fail if tokens / rows have # been pruned. indexes = list( self._get_cell_token_indexes( column_ids, row_ids, column_id=coordinates.column_index, row_id=coordinates.row_index - 1, ) ) indexes.sort() coordinate_answer_ids = [] if indexes: begin_index = coordinates.token_index + indexes[0] end_index = begin_index + len(answer_text) for index in indexes: if index >= begin_index and index < end_index: coordinate_answer_ids.append(index) if len(coordinate_answer_ids) == len(answer_text): for index in coordinate_answer_ids: answer_ids[index] = 1 break return answer_ids def _get_answer_ids(self, column_ids, row_ids, answer_coordinates): """Maps answer coordinates of a question to token indexes.""" answer_ids, missing_count = self._get_all_answer_ids(column_ids, row_ids, answer_coordinates) if missing_count: raise ValueError("Couldn't find all answers") return answer_ids def get_answer_ids(self, column_ids, row_ids, tokenized_table, answer_texts_question, answer_coordinates_question): if self.update_answer_coordinates: return self._find_answer_ids_from_answer_texts( column_ids, row_ids, tokenized_table, answer_texts=[self.tokenize(at) for at in answer_texts_question], ) return self._get_answer_ids(column_ids, row_ids, answer_coordinates_question) def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names if padding_strategy == PaddingStrategy.LONGEST: max_length = len(encoded_inputs["input_ids"]) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = ( padding_strategy != PaddingStrategy.DO_NOT_PAD and len(encoded_inputs["input_ids"]) != max_length ) # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) if needs_to_be_padded: difference = max_length - len(encoded_inputs["input_ids"]) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [[self.pad_token_type_id] * 7] * difference ) if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [0] * difference if "numeric_values" in encoded_inputs: encoded_inputs["numeric_values"] = encoded_inputs["numeric_values"] + [float("nan")] * difference if "numeric_values_scale" in encoded_inputs: encoded_inputs["numeric_values_scale"] = ( encoded_inputs["numeric_values_scale"] + [1.0] * difference ) if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [[self.pad_token_type_id] * 7] * difference + encoded_inputs[ "token_type_ids" ] if "labels" in encoded_inputs: encoded_inputs["labels"] = [0] * difference + encoded_inputs["labels"] if "numeric_values" in encoded_inputs: encoded_inputs["numeric_values"] = [float("nan")] * difference + encoded_inputs["numeric_values"] if "numeric_values_scale" in encoded_inputs: encoded_inputs["numeric_values_scale"] = [1.0] * difference + encoded_inputs[ "numeric_values_scale" ] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs # Everything related to converting logits to predictions def _get_cell_token_probs(self, probabilities, segment_ids, row_ids, column_ids): for i, p in enumerate(probabilities): segment_id = segment_ids[i] col = column_ids[i] - 1 row = row_ids[i] - 1 if col >= 0 and row >= 0 and segment_id == 1: yield i, p def _get_mean_cell_probs(self, probabilities, segment_ids, row_ids, column_ids): """Computes average probability per cell, aggregating over tokens.""" coords_to_probs = collections.defaultdict(list) for i, prob in self._get_cell_token_probs(probabilities, segment_ids, row_ids, column_ids): col = column_ids[i] - 1 row = row_ids[i] - 1 coords_to_probs[(col, row)].append(prob) return {coords: np.array(cell_probs).mean() for coords, cell_probs in coords_to_probs.items()} def convert_logits_to_predictions(self, data, logits, logits_agg=None, cell_classification_threshold=0.5): """ Converts logits of [`TapasForQuestionAnswering`] to actual predicted answer coordinates and optional aggregation indices. The original implementation, on which this function is based, can be found [here](https://github.com/google-research/tapas/blob/4908213eb4df7aa988573350278b44c4dbe3f71b/tapas/experiments/prediction_utils.py#L288). Args: data (`dict`): Dictionary mapping features to actual values. Should be created using [`TapasTokenizer`]. logits (`torch.Tensor` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Tensor containing the logits at the token level. logits_agg (`torch.Tensor` or `tf.Tensor` of shape `(batch_size, num_aggregation_labels)`, *optional*): Tensor containing the aggregation logits. cell_classification_threshold (`float`, *optional*, defaults to 0.5): Threshold to be used for cell selection. All table cells for which their probability is larger than this threshold will be selected. Returns: `tuple` comprising various elements depending on the inputs: - predicted_answer_coordinates (`List[List[[tuple]]` of length `batch_size`): Predicted answer coordinates as a list of lists of tuples. Each element in the list contains the predicted answer coordinates of a single example in the batch, as a list of tuples. Each tuple is a cell, i.e. (row index, column index). - predicted_aggregation_indices (`List[int]`of length `batch_size`, *optional*, returned when `logits_aggregation` is provided): Predicted aggregation operator indices of the aggregation head. """ # converting to numpy arrays to work with PT/TF logits = logits.numpy() if logits_agg is not None: logits_agg = logits_agg.numpy() data = {key: value.numpy() for key, value in data.items() if key != "training"} # input data is of type float32 # np.log(np.finfo(np.float32).max) = 88.72284 # Any value over 88.72284 will overflow when passed through the exponential, sending a warning # We disable this warning by truncating the logits. logits[logits < -88.7] = -88.7 # Compute probabilities from token logits probabilities = 1 / (1 + np.exp(-logits)) * data["attention_mask"] token_types = [ "segment_ids", "column_ids", "row_ids", "prev_labels", "column_ranks", "inv_column_ranks", "numeric_relations", ] # collect input_ids, segment ids, row ids and column ids of batch. Shape (batch_size, seq_len) input_ids = data["input_ids"] segment_ids = data["token_type_ids"][:, :, token_types.index("segment_ids")] row_ids = data["token_type_ids"][:, :, token_types.index("row_ids")] column_ids = data["token_type_ids"][:, :, token_types.index("column_ids")] # next, get answer coordinates for every example in the batch num_batch = input_ids.shape[0] predicted_answer_coordinates = [] for i in range(num_batch): probabilities_example = probabilities[i].tolist() segment_ids_example = segment_ids[i] row_ids_example = row_ids[i] column_ids_example = column_ids[i] max_width = column_ids_example.max() max_height = row_ids_example.max() if max_width == 0 and max_height == 0: continue cell_coords_to_prob = self._get_mean_cell_probs( probabilities_example, segment_ids_example.tolist(), row_ids_example.tolist(), column_ids_example.tolist(), ) # Select the answers above the classification threshold. answer_coordinates = [] for col in range(max_width): for row in range(max_height): cell_prob = cell_coords_to_prob.get((col, row), None) if cell_prob is not None: if cell_prob > cell_classification_threshold: answer_coordinates.append((row, col)) answer_coordinates = sorted(answer_coordinates) predicted_answer_coordinates.append(answer_coordinates) output = (predicted_answer_coordinates,) if logits_agg is not None: predicted_aggregation_indices = logits_agg.argmax(axis=-1) output = (predicted_answer_coordinates, predicted_aggregation_indices.tolist()) return output # End of everything related to converting logits to predictions # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True, ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) # prevents treating the same character with different unicode codepoints as different characters unicode_normalized_text = unicodedata.normalize("NFC", text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens # Below: utilities for TAPAS tokenizer (independent from PyTorch/Tensorflow). # This includes functions to parse numeric values (dates and numbers) from both the table and questions in order # to create the column_ranks, inv_column_ranks, numeric_values, numeric values_scale and numeric_relations in # prepare_for_model of TapasTokenizer. # These are meant to be used in an academic setup, for production use cases Gold mine or Aqua should be used. # taken from constants.py of the original implementation # URL: https://github.com/google-research/tapas/blob/master/tapas/utils/constants.py class Relation(enum.Enum): HEADER_TO_CELL = 1 # Connects header to cell. CELL_TO_HEADER = 2 # Connects cell to header. QUERY_TO_HEADER = 3 # Connects query to headers. QUERY_TO_CELL = 4 # Connects query to cells. ROW_TO_CELL = 5 # Connects row to cells. CELL_TO_ROW = 6 # Connects cells to row. EQ = 7 # Annotation value is same as cell value LT = 8 # Annotation value is less than cell value GT = 9 # Annotation value is greater than cell value @dataclass class Date: year: Optional[int] = None month: Optional[int] = None day: Optional[int] = None @dataclass class NumericValue: float_value: Optional[float] = None date: Optional[Date] = None @dataclass class NumericValueSpan: begin_index: int = None end_index: int = None values: List[NumericValue] = None @dataclass class Cell: text: Text numeric_value: Optional[NumericValue] = None @dataclass class Question: original_text: Text # The original raw question string. text: Text # The question string after normalization. numeric_spans: Optional[List[NumericValueSpan]] = None # Below: all functions from number_utils.py as well as 2 functions (namely get_all_spans and normalize_for_match) # from text_utils.py of the original implementation. URL's: # - https://github.com/google-research/tapas/blob/master/tapas/utils/number_utils.py # - https://github.com/google-research/tapas/blob/master/tapas/utils/text_utils.py # Constants for parsing date expressions. # Masks that specify (by a bool) which of (year, month, day) will be populated. _DateMask = collections.namedtuple("_DateMask", ["year", "month", "day"]) _YEAR = _DateMask(True, False, False) _YEAR_MONTH = _DateMask(True, True, False) _YEAR_MONTH_DAY = _DateMask(True, True, True) _MONTH = _DateMask(False, True, False) _MONTH_DAY = _DateMask(False, True, True) # Pairs of patterns to pass to 'datetime.strptime' and masks specifying which # fields will be set by the corresponding pattern. _DATE_PATTERNS = ( ("%B", _MONTH), ("%Y", _YEAR), ("%Ys", _YEAR), ("%b %Y", _YEAR_MONTH), ("%B %Y", _YEAR_MONTH), ("%B %d", _MONTH_DAY), ("%b %d", _MONTH_DAY), ("%d %b", _MONTH_DAY), ("%d %B", _MONTH_DAY), ("%B %d, %Y", _YEAR_MONTH_DAY), ("%d %B %Y", _YEAR_MONTH_DAY), ("%m-%d-%Y", _YEAR_MONTH_DAY), ("%Y-%m-%d", _YEAR_MONTH_DAY), ("%Y-%m", _YEAR_MONTH), ("%B %Y", _YEAR_MONTH), ("%d %b %Y", _YEAR_MONTH_DAY), ("%Y-%m-%d", _YEAR_MONTH_DAY), ("%b %d, %Y", _YEAR_MONTH_DAY), ("%d.%m.%Y", _YEAR_MONTH_DAY), ("%A, %b %d", _MONTH_DAY), ("%A, %B %d", _MONTH_DAY), ) # This mapping is used to convert date patterns to regex patterns. _FIELD_TO_REGEX = ( ("%A", r"\w+"), # Weekday as locale’s full name. ("%B", r"\w+"), # Month as locale’s full name. ("%Y", r"\d{4}"), # Year with century as a decimal number. ("%b", r"\w{3}"), # Month as locale’s abbreviated name. ("%d", r"\d{1,2}"), # Day of the month as a zero-padded decimal number. ("%m", r"\d{1,2}"), # Month as a zero-padded decimal number. ) def _process_date_pattern(dp): """Compute a regex for each date pattern to use as a prefilter.""" pattern, mask = dp regex = pattern regex = regex.replace(".", re.escape(".")) regex = regex.replace("-", re.escape("-")) regex = regex.replace(" ", r"\s+") for field, field_regex in _FIELD_TO_REGEX: regex = regex.replace(field, field_regex) # Make sure we didn't miss any of the fields. assert "%" not in regex, regex return pattern, mask, re.compile("^" + regex + "$") def _process_date_patterns(): return tuple(_process_date_pattern(dp) for dp in _DATE_PATTERNS) _PROCESSED_DATE_PATTERNS = _process_date_patterns() _MAX_DATE_NGRAM_SIZE = 5 # Following DynSp: # https://github.com/Microsoft/DynSP/blob/master/util.py#L414. _NUMBER_WORDS = [ "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", ] _ORDINAL_WORDS = [ "zeroth", "first", "second", "third", "fourth", "fith", "sixth", "seventh", "eighth", "ninth", "tenth", "eleventh", "twelfth", ] _ORDINAL_SUFFIXES = ["st", "nd", "rd", "th"] _NUMBER_PATTERN = re.compile(r"((^|\s)[+-])?((\.\d+)|(\d+(,\d\d\d)*(\.\d*)?))") # Following DynSp: # https://github.com/Microsoft/DynSP/blob/master/util.py#L293. _MIN_YEAR = 1700 _MAX_YEAR = 2016 _INF = float("INF") def _get_numeric_value_from_date(date, mask): """Converts date (datetime Python object) to a NumericValue object with a Date object value.""" if date.year < _MIN_YEAR or date.year > _MAX_YEAR: raise ValueError(f"Invalid year: {date.year}") new_date = Date() if mask.year: new_date.year = date.year if mask.month: new_date.month = date.month if mask.day: new_date.day = date.day return NumericValue(date=new_date) def _get_span_length_key(span): """Sorts span by decreasing length first and increasing first index second.""" return span[1] - span[0], -span[0] def _get_numeric_value_from_float(value): """Converts float (Python) to a NumericValue object with a float value.""" return NumericValue(float_value=value) # Doesn't parse ordinal expressions such as '18th of february 1655'. def _parse_date(text): """Attempts to format a text as a standard date string (yyyy-mm-dd).""" text = re.sub(r"Sept\b", "Sep", text) for in_pattern, mask, regex in _PROCESSED_DATE_PATTERNS: if not regex.match(text): continue try: date = datetime.datetime.strptime(text, in_pattern).date() except ValueError: continue try: return _get_numeric_value_from_date(date, mask) except ValueError: continue return None def _parse_number(text): """Parses simple cardinal and ordinals numbers.""" for suffix in _ORDINAL_SUFFIXES: if text.endswith(suffix): text = text[: -len(suffix)] break text = text.replace(",", "") try: value = float(text) except ValueError: return None if math.isnan(value): return None if value == _INF: return None return value def get_all_spans(text, max_ngram_length): """ Split a text into all possible ngrams up to 'max_ngram_length'. Split points are white space and punctuation. Args: text: Text to split. max_ngram_length: maximal ngram length. Yields: Spans, tuples of begin-end index. """ start_indexes = [] for index, char in enumerate(text): if not char.isalnum(): continue if index == 0 or not text[index - 1].isalnum(): start_indexes.append(index) if index + 1 == len(text) or not text[index + 1].isalnum(): for start_index in start_indexes[-max_ngram_length:]: yield start_index, index + 1 def normalize_for_match(text): return " ".join(text.lower().split()) def format_text(text): """Lowercases and strips punctuation.""" text = text.lower().strip() if text == "n/a" or text == "?" or text == "nan": text = EMPTY_TEXT text = re.sub(r"[^\w\d]+", " ", text).replace("_", " ") text = " ".join(text.split()) text = text.strip() if text: return text return EMPTY_TEXT def parse_text(text): """ Extracts longest number and date spans. Args: text: text to annotate Returns: List of longest numeric value spans. """ span_dict = collections.defaultdict(list) for match in _NUMBER_PATTERN.finditer(text): span_text = text[match.start() : match.end()] number = _parse_number(span_text) if number is not None: span_dict[match.span()].append(_get_numeric_value_from_float(number)) for begin_index, end_index in get_all_spans(text, max_ngram_length=1): if (begin_index, end_index) in span_dict: continue span_text = text[begin_index:end_index] number = _parse_number(span_text) if number is not None: span_dict[begin_index, end_index].append(_get_numeric_value_from_float(number)) for number, word in enumerate(_NUMBER_WORDS): if span_text == word: span_dict[begin_index, end_index].append(_get_numeric_value_from_float(float(number))) break for number, word in enumerate(_ORDINAL_WORDS): if span_text == word: span_dict[begin_index, end_index].append(_get_numeric_value_from_float(float(number))) break for begin_index, end_index in get_all_spans(text, max_ngram_length=_MAX_DATE_NGRAM_SIZE): span_text = text[begin_index:end_index] date = _parse_date(span_text) if date is not None: span_dict[begin_index, end_index].append(date) spans = sorted(span_dict.items(), key=lambda span_value: _get_span_length_key(span_value[0]), reverse=True) selected_spans = [] for span, value in spans: for selected_span, _ in selected_spans: if selected_span[0] <= span[0] and span[1] <= selected_span[1]: break else: selected_spans.append((span, value)) selected_spans.sort(key=lambda span_value: span_value[0][0]) numeric_value_spans = [] for span, values in selected_spans: numeric_value_spans.append(NumericValueSpan(begin_index=span[0], end_index=span[1], values=values)) return numeric_value_spans # Below: all functions from number_annotation_utils.py and 2 functions (namely filter_invalid_unicode # and filter_invalid_unicode_from_table) from text_utils.py of the original implementation. URL's: # - https://github.com/google-research/tapas/blob/master/tapas/utils/number_annotation_utils.py # - https://github.com/google-research/tapas/blob/master/tapas/utils/text_utils.py _PrimitiveNumericValue = Union[float, Tuple[Optional[float], Optional[float], Optional[float]]] _SortKeyFn = Callable[[NumericValue], Tuple[float, Ellipsis]] _DATE_TUPLE_SIZE = 3 EMPTY_TEXT = "EMPTY" NUMBER_TYPE = "number" DATE_TYPE = "date" def _get_value_type(numeric_value): if numeric_value.float_value is not None: return NUMBER_TYPE elif numeric_value.date is not None: return DATE_TYPE raise ValueError(f"Unknown type: {numeric_value}") def _get_value_as_primitive_value(numeric_value): """Maps a NumericValue proto to a float or tuple of float.""" if numeric_value.float_value is not None: return numeric_value.float_value if numeric_value.date is not None: date = numeric_value.date value_tuple = [None, None, None] # All dates fields are cased to float to produce a simple primitive value. if date.year is not None: value_tuple[0] = float(date.year) if date.month is not None: value_tuple[1] = float(date.month) if date.day is not None: value_tuple[2] = float(date.day) return tuple(value_tuple) raise ValueError(f"Unknown type: {numeric_value}") def _get_all_types(numeric_values): return {_get_value_type(value) for value in numeric_values} def get_numeric_sort_key_fn(numeric_values): """ Creates a function that can be used as a sort key or to compare the values. Maps to primitive types and finds the biggest common subset. Consider the values "05/05/2010" and "August 2007". With the corresponding primitive values (2010.,5.,5.) and (2007.,8., None). These values can be compared by year and date so we map to the sequence (2010., 5.), (2007., 8.). If we added a third value "2006" with primitive value (2006., None, None), we could only compare by the year so we would map to (2010.,), (2007.,) and (2006.,). Args: numeric_values: Values to compare Returns: A function that can be used as a sort key function (mapping numeric values to a comparable tuple) Raises: ValueError if values don't have a common type or are not comparable. """ value_types = _get_all_types(numeric_values) if len(value_types) != 1: raise ValueError(f"No common value type in {numeric_values}") value_type = next(iter(value_types)) if value_type == NUMBER_TYPE: # Primitive values are simple floats, nothing to do here. return _get_value_as_primitive_value # The type can only be Date at this point which means the primitive type # is a float triple. valid_indexes = set(range(_DATE_TUPLE_SIZE)) for numeric_value in numeric_values: value = _get_value_as_primitive_value(numeric_value) assert isinstance(value, tuple) for tuple_index, inner_value in enumerate(value): if inner_value is None: valid_indexes.discard(tuple_index) if not valid_indexes: raise ValueError(f"No common value in {numeric_values}") def _sort_key_fn(numeric_value): value = _get_value_as_primitive_value(numeric_value) return tuple(value[index] for index in valid_indexes) return _sort_key_fn def _consolidate_numeric_values(row_index_to_values, min_consolidation_fraction, debug_info): """ Finds the most common numeric values in a column and returns them Args: row_index_to_values: For each row index all the values in that cell. min_consolidation_fraction: Fraction of cells that need to have consolidated value. debug_info: Additional information only used for logging Returns: For each row index the first value that matches the most common value. Rows that don't have a matching value are dropped. Empty list if values can't be consolidated. """ type_counts = collections.Counter() for numeric_values in row_index_to_values.values(): type_counts.update(_get_all_types(numeric_values)) if not type_counts: return {} max_count = max(type_counts.values()) if max_count < len(row_index_to_values) * min_consolidation_fraction: # logging.log_every_n(logging.INFO, f'Can\'t consolidate types: {debug_info} {row_index_to_values} {max_count}', 100) return {} valid_types = set() for value_type, count in type_counts.items(): if count == max_count: valid_types.add(value_type) if len(valid_types) > 1: assert DATE_TYPE in valid_types max_type = DATE_TYPE else: max_type = next(iter(valid_types)) new_row_index_to_value = {} for index, values in row_index_to_values.items(): # Extract the first matching value. for value in values: if _get_value_type(value) == max_type: new_row_index_to_value[index] = value break return new_row_index_to_value def _get_numeric_values(text): """Parses text and returns numeric values.""" numeric_spans = parse_text(text) return itertools.chain(*(span.values for span in numeric_spans)) def _get_column_values(table, col_index): """ Parses text in column and returns a dict mapping row_index to values. This is the _get_column_values function from number_annotation_utils.py of the original implementation Args: table: Pandas dataframe col_index: integer, indicating the index of the column to get the numeric values of """ index_to_values = {} for row_index, row in table.iterrows(): text = normalize_for_match(row[col_index].text) index_to_values[row_index] = list(_get_numeric_values(text)) return index_to_values def get_numeric_relation(value, other_value, sort_key_fn): """Compares two values and returns their relation or None.""" value = sort_key_fn(value) other_value = sort_key_fn(other_value) if value == other_value: return Relation.EQ if value < other_value: return Relation.LT if value > other_value: return Relation.GT return None def add_numeric_values_to_question(question): """Adds numeric value spans to a question.""" original_text = question question = normalize_for_match(question) numeric_spans = parse_text(question) return Question(original_text=original_text, text=question, numeric_spans=numeric_spans) def filter_invalid_unicode(text): """Return an empty string and True if 'text' is in invalid unicode.""" return ("", True) if isinstance(text, bytes) else (text, False) def filter_invalid_unicode_from_table(table): """ Removes invalid unicode from table. Checks whether a table cell text contains an invalid unicode encoding. If yes, reset the table cell text to an empty str and log a warning for each invalid cell Args: table: table to clean. """ # to do: add table id support if not hasattr(table, "table_id"): table.table_id = 0 for row_index, row in table.iterrows(): for col_index, cell in enumerate(row): cell, is_invalid = filter_invalid_unicode(cell) if is_invalid: logging.warning( f"Scrub an invalid table body @ table_id: {table.table_id}, row_index: {row_index}, " f"col_index: {col_index}", ) for col_index, column in enumerate(table.columns): column, is_invalid = filter_invalid_unicode(column) if is_invalid: logging.warning(f"Scrub an invalid table header @ table_id: {table.table_id}, col_index: {col_index}") def add_numeric_table_values(table, min_consolidation_fraction=0.7, debug_info=None): """ Parses text in table column-wise and adds the consolidated values. Consolidation refers to finding values with a common types (date or number) Args: table: Table to annotate. min_consolidation_fraction: Fraction of cells in a column that need to have consolidated value. debug_info: Additional information used for logging. """ table = table.copy() # First, filter table on invalid unicode filter_invalid_unicode_from_table(table) # Second, replace cell values by Cell objects for row_index, row in table.iterrows(): for col_index, cell in enumerate(row): table.iloc[row_index, col_index] = Cell(text=cell) # Third, add numeric_value attributes to these Cell objects for col_index, column in enumerate(table.columns): column_values = _consolidate_numeric_values( _get_column_values(table, col_index), min_consolidation_fraction=min_consolidation_fraction, debug_info=(debug_info, column), ) for row_index, numeric_value in column_values.items(): table.iloc[row_index, col_index].numeric_value = numeric_value return table
transformers/src/transformers/models/tapas/tokenization_tapas.py/0
{ "file_path": "transformers/src/transformers/models/tapas/tokenization_tapas.py", "repo_id": "transformers", "token_count": 53968 }
358
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available, ) _import_structure = { "configuration_tvlt": ["TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP", "TvltConfig"], "feature_extraction_tvlt": ["TvltFeatureExtractor"], "processing_tvlt": ["TvltProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tvlt"] = [ "TVLT_PRETRAINED_MODEL_ARCHIVE_LIST", "TvltModel", "TvltForPreTraining", "TvltForAudioVisualClassification", "TvltPreTrainedModel", ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["image_processing_tvlt"] = ["TvltImageProcessor"] if TYPE_CHECKING: from .configuration_tvlt import TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP, TvltConfig from .processing_tvlt import TvltProcessor from .feature_extraction_tvlt import TvltFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tvlt import ( TVLT_PRETRAINED_MODEL_ARCHIVE_LIST, TvltForAudioVisualClassification, TvltForPreTraining, TvltModel, TvltPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_tvlt import TvltImageProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/tvlt/__init__.py/0
{ "file_path": "transformers/src/transformers/models/tvlt/__init__.py", "repo_id": "transformers", "token_count": 1013 }
359
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ UniSpeech model configuration""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/unispeech-large-1500h-cv": ( "https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class UniSpeechConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`UniSpeechModel`]. It is used to instantiate an UniSpeech model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UniSpeech [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32): Vocabulary size of the UniSpeech model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`UniSpeechModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`UniSpeechModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_quantizer_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the output of the feature encoder that's used by the quantizer. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`UniSpeechForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_extract_activation (`str, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 2, 2)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. do_stable_layer_norm (`bool`, *optional*, defaults to `False`): Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is False` corresponds to applying layer norm after the attention layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://arxiv.org/abs/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2): The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0): The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' num_codevectors_per_group (`int`, *optional*, defaults to 320): Number of entries in each quantization codebook (group). num_codevector_groups (`int`, *optional*, defaults to 2): Number of codevector groups for product codevector quantization. contrastive_logits_temperature (`float`, *optional*, defaults to 0.1): The temperature *kappa* in the contrastive loss. num_negatives (`int`, *optional*, defaults to 100): Number of negative samples for the contrastive loss. codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the quantized feature vectors. proj_codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the final projection of both the quantized and the transformer features. diversity_loss_weight (`int`, *optional*, defaults to 0.1): The weight of the codebook diversity loss component. ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`UniSpeechForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`UniSpeechForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`UniSpeechForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. num_ctc_classes (`int`, *optional*, defaults to 80): Specifies the number of classes (phoneme tokens and blank token) for phoneme-level CTC loss. Only relevant when using an instance of [`UniSpeechForPreTraining`]. pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token. bos_token_id (`int`, *optional*, defaults to 1): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 2): The id of the "end-of-sequence" token. replace_prob (`float`, *optional*, defaults to 0.5): Propability that transformer feature is replaced by quantized feature for pretraining. Example: ```python >>> from transformers import UniSpeechConfig, UniSpeechModel >>> # Initializing a UniSpeech facebook/unispeech-base-960h style configuration >>> configuration = UniSpeechConfig() >>> # Initializing a model (with random weights) from the facebook/unispeech-base-960h style configuration >>> model = UniSpeechModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "unispeech" def __init__( self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, feat_quantizer_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-5, feat_extract_norm="group", feat_extract_activation="gelu", conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction="mean", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, num_ctc_classes=80, pad_token_id=0, bos_token_id=1, eos_token_id=2, replace_prob=0.5, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.num_ctc_classes = num_ctc_classes self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.use_weighted_layer_sum = use_weighted_layer_sum self.classifier_proj_size = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," f" `len(config.conv_kernel) = {len(self.conv_kernel)}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks # parameters for pretraining with codevector quantized representations self.num_codevectors_per_group = num_codevectors_per_group self.num_codevector_groups = num_codevector_groups self.contrastive_logits_temperature = contrastive_logits_temperature self.feat_quantizer_dropout = feat_quantizer_dropout self.num_negatives = num_negatives self.codevector_dim = codevector_dim self.proj_codevector_dim = proj_codevector_dim self.diversity_loss_weight = diversity_loss_weight # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # pretraining loss self.replace_prob = replace_prob @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1)
transformers/src/transformers/models/unispeech/configuration_unispeech.py/0
{ "file_path": "transformers/src/transformers/models/unispeech/configuration_unispeech.py", "repo_id": "transformers", "token_count": 6896 }
360
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Swin Transformer + UperNet checkpoints from mmsegmentation. URL: https://github.com/open-mmlab/mmsegmentation/tree/master/configs/swin """ import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def get_upernet_config(model_name): auxiliary_in_channels = 384 window_size = 7 if "tiny" in model_name: embed_dim = 96 depths = (2, 2, 6, 2) num_heads = (3, 6, 12, 24) elif "small" in model_name: embed_dim = 96 depths = (2, 2, 18, 2) num_heads = (3, 6, 12, 24) elif "base" in model_name: embed_dim = 128 depths = (2, 2, 18, 2) num_heads = (4, 8, 16, 32) window_size = 12 auxiliary_in_channels = 512 elif "large" in model_name: embed_dim = 192 depths = (2, 2, 18, 2) num_heads = (6, 12, 24, 48) window_size = 12 auxiliary_in_channels = 768 # set label information num_labels = 150 repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} backbone_config = SwinConfig( embed_dim=embed_dim, depths=depths, num_heads=num_heads, window_size=window_size, out_features=["stage1", "stage2", "stage3", "stage4"], ) config = UperNetConfig( backbone_config=backbone_config, auxiliary_in_channels=auxiliary_in_channels, num_labels=num_labels, id2label=id2label, label2id=label2id, ) return config # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config): rename_keys = [] # fmt: off # stem rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight")) rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias")) rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight")) rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias")) # stages for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight")) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias")) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight")) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight")) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias")) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight")) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias")) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, backbone_config): num_features = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))] for i in range(len(backbone_config.depths)): dim = num_features[i] for j in range(backbone_config.depths[i]): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight") in_proj_bias = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.query.weight"] = in_proj_weight[:dim, :] state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.query.bias"] = in_proj_bias[: dim] state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.key.weight"] = in_proj_weight[ dim : dim * 2, : ] state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.key.bias"] = in_proj_bias[ dim : dim * 2 ] state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.value.weight"] = in_proj_weight[ -dim :, : ] state_dict[f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.value.bias"] = in_proj_bias[-dim :] # fmt: on def correct_unfold_reduction_order(x): out_channel, in_channel = x.shape x = x.reshape(out_channel, 4, in_channel // 4) x = x[:, [0, 2, 1, 3], :].transpose(1, 2).reshape(out_channel, in_channel) return x def reverse_correct_unfold_reduction_order(x): out_channel, in_channel = x.shape x = x.reshape(out_channel, in_channel // 4, 4) x = x[:, :, [0, 2, 1, 3]].transpose(1, 2).reshape(out_channel, in_channel) return x def correct_unfold_norm_order(x): in_channel = x.shape[0] x = x.reshape(4, in_channel // 4) x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) return x # there was an incompatibility with this version, due to a new implementation of their downsampling operation using nn.Unfold. # was resolved as seen here: # https://github.com/open-mmlab/mmdetection/blob/31c84958f54287a8be2b99cbf87a6dcf12e57753/mmdet/models/utils/ckpt_convert.py#L96. def reverse_correct_unfold_norm_order(x): in_channel = x.shape[0] x = x.reshape(in_channel // 4, 4) x = x[:, [0, 2, 1, 3]].transpose(0, 1).reshape(in_channel) return x def convert_upernet_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): model_name_to_url = { "upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth", "upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth", "upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth", "upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth", } checkpoint_url = model_name_to_url[model_name] state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", file_name=model_name)[ "state_dict" ] for name, param in state_dict.items(): print(name, param.shape) config = get_upernet_config(model_name) model = UperNetForSemanticSegmentation(config) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): val = state_dict.pop(key) if "bn" in key: key = key.replace("bn", "batch_norm") state_dict[key] = val # rename keys rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config.backbone_config) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: state_dict[key] = reverse_correct_unfold_reduction_order(value) if "norm" in key: state_dict[key] = reverse_correct_unfold_norm_order(value) model.load_state_dict(state_dict) # verify on image url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") processor = SegformerImageProcessor() pixel_values = processor(image, return_tensors="pt").pixel_values with torch.no_grad(): outputs = model(pixel_values) logits = outputs.logits print(logits.shape) print("First values of logits:", logits[0, 0, :3, :3]) # assert values if model_name == "upernet-swin-tiny": expected_slice = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": expected_slice = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": expected_slice = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": expected_slice = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("Logits:", outputs.logits[0, 0, :3, :3]) assert torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub") model.push_to_hub(f"openmmlab/{model_name}") processor.push_to_hub(f"openmmlab/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-swin-tiny", type=str, choices=[f"upernet-swin-{size}" for size in ["tiny", "small", "base", "large"]], help="Name of the Swin + UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/upernet/convert_swin_upernet_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/upernet/convert_swin_upernet_to_pytorch.py", "repo_id": "transformers", "token_count": 6234 }
361
# coding=utf-8 # Copyright 2023 Microsoft Research & University of Wisconsin-Madison and the HuggingFace Inc. team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ VipLlava model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP = { "ybelkada/vip-llava-7b-hf": "https://huggingface.co/llava-hf/vip-llava-7b-hf/resolve/main/config.json", } class VipLlavaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VipLlavaForConditionalGeneration`]. It is used to instantiate an VipLlava model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VipLlava-9B. e.g. [ybelkada/vip-llava-7b-hf](https://huggingface.co/ybelkada/vip-llava-7b-hf) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`VipLlavaVisionConfig`, *optional*): Custom vision config or dict text_config (`Union[AutoConfig, dict]`, *optional*): The config object of the text backbone. Can be any of `LlamaConfig` or `MistralConfig`. ignore_index (`int`, *optional*, defaults to -100): The ignore index for the loss function. image_token_index (`int`, *optional*, defaults to 32000): The image token index to encode the image prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. projector_layernorm_eps (`float`, *optional*, defaults to 1e-05): The layer norm epsilon of the projector layernorm vision_feature_layers (`List[int]`, *optional*, defaults to `[-2, -5, -8, -11, 6]`): The list of layers to select the vision features from. vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the VipLlava model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`~VipLlavaForConditionalGeneration`] Example: ```python >>> from transformers import VipLlavaForConditionalGeneration, VipLlavaConfig, CLIPVisionConfig, LlamaConfig >>> # Initializing a CLIP-vision config >>> vision_config = CLIPVisionConfig() >>> # Initializing a Llama config >>> text_config = LlamaConfig() >>> # Initializing a VipLlava vipllava-7b style configuration >>> configuration = VipLlavaConfig(vision_config, text_config) >>> # Initializing a model from the vipllava-7b style configuration >>> model = VipLlavaForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vipllava" is_composition = False def __init__( self, vision_config=None, text_config=None, ignore_index=-100, image_token_index=32000, projector_hidden_act="gelu", projector_layernorm_eps=1e-5, vision_feature_layers=[-2, -5, -8, -11, 6], vocab_size=32000, **kwargs, ): self.ignore_index = ignore_index self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.projector_layernorm_eps = projector_layernorm_eps self.vision_feature_layers = vision_feature_layers self.vocab_size = vocab_size self.vision_config = vision_config if isinstance(self.vision_config, dict): vision_config["model_type"] = ( vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model" ) self.vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) elif vision_config is None: self.vision_config = CONFIG_MAPPING["clip_vision_model"]( intermediate_size=4096, hidden_size=1024, patch_size=14, image_size=336, num_hidden_layers=24, num_attention_heads=16, vocab_size=32000, projection_dim=768, ) self.vocab_size = self.vocab_size self.text_config = text_config if isinstance(self.text_config, dict): text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama" self.text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) self.vocab_size = self.text_config.vocab_size elif text_config is None: self.text_config = CONFIG_MAPPING["llama"]() super().__init__(**kwargs)
transformers/src/transformers/models/vipllava/configuration_vipllava.py/0
{ "file_path": "transformers/src/transformers/models/vipllava/configuration_vipllava.py", "repo_id": "transformers", "token_count": 2161 }
362
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert VisualBert checkpoint.""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) rename_keys_prefix = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] ACCEPTABLE_CHECKPOINTS = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def load_state_dict(checkpoint_path): sd = torch.load(checkpoint_path, map_location="cpu") return sd def get_new_dict(d, config, rename_keys_prefix=rename_keys_prefix): new_d = OrderedDict() new_d["visual_bert.embeddings.position_ids"] = torch.arange(config.max_position_embeddings).expand((1, -1)) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue new_key = key for name_pair in rename_keys_prefix: new_key = new_key.replace(name_pair[0], name_pair[1]) new_d[new_key] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately new_d["cls.predictions.decoder.bias"] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def convert_visual_bert_checkpoint(checkpoint_path, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our VisualBERT structure. """ assert ( checkpoint_path.split("/")[-1] in ACCEPTABLE_CHECKPOINTS ), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}." # Get Config if "pre" in checkpoint_path: model_type = "pretraining" if "vcr" in checkpoint_path: config_params = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: config_params = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: config_params = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: config_params = {"visual_embedding_dim": 1024} else: raise NotImplementedError(f"No implementation found for `{checkpoint_path}`.") else: if "vcr" in checkpoint_path: config_params = {"visual_embedding_dim": 512} model_type = "multichoice" elif "vqa_advanced" in checkpoint_path: config_params = {"visual_embedding_dim": 2048} model_type = "vqa_advanced" elif "vqa" in checkpoint_path: config_params = {"visual_embedding_dim": 2048, "num_labels": 3129} model_type = "vqa" elif "nlvr" in checkpoint_path: config_params = { "visual_embedding_dim": 1024, "num_labels": 2, } model_type = "nlvr" config = VisualBertConfig(**config_params) # Load State Dict state_dict = load_state_dict(checkpoint_path) new_state_dict = get_new_dict(state_dict, config) if model_type == "pretraining": model = VisualBertForPreTraining(config) elif model_type == "vqa": model = VisualBertForQuestionAnswering(config) elif model_type == "nlvr": model = VisualBertForVisualReasoning(config) elif model_type == "multichoice": model = VisualBertForMultipleChoice(config) model.load_state_dict(new_state_dict) # Save Checkpoints Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") args = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
transformers/src/transformers/models/visual_bert/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/visual_bert/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 2115 }
363
# coding=utf-8 # Copyright 2023 HUST-VL and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch ViTMatte model.""" from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...utils.backbone_utils import load_backbone from .configuration_vitmatte import VitMatteConfig VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST = [ "hustvl/vitmatte-small-composition-1k", # See all VitMatte models at https://huggingface.co/models?filter=vitmatte ] # General docstring _CONFIG_FOR_DOC = "VitMatteConfig" @dataclass class ImageMattingOutput(ModelOutput): """ Class for outputs of image matting models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Loss. alphas (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Estimated alpha values. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None alphas: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None class VitMattePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = VitMatteConfig main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, nn.Conv2d): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() class VitMatteBasicConv3x3(nn.Module): """ Basic convolution layers including: Conv3x3, BatchNorm2d, ReLU layers. """ def __init__(self, config, in_channels, out_channels, stride=2, padding=1): super().__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, bias=False, ) self.batch_norm = nn.BatchNorm2d(out_channels, eps=config.batch_norm_eps) self.relu = nn.ReLU() def forward(self, hidden_state): hidden_state = self.conv(hidden_state) hidden_state = self.batch_norm(hidden_state) hidden_state = self.relu(hidden_state) return hidden_state class VitMatteConvStream(nn.Module): """ Simple ConvStream containing a series of basic conv3x3 layers to extract detail features. """ def __init__(self, config): super().__init__() in_channels = config.backbone_config.num_channels out_channels = config.convstream_hidden_sizes self.convs = nn.ModuleList() self.conv_chans = [in_channels] + out_channels for i in range(len(self.conv_chans) - 1): in_chan_ = self.conv_chans[i] out_chan_ = self.conv_chans[i + 1] self.convs.append(VitMatteBasicConv3x3(config, in_chan_, out_chan_)) def forward(self, pixel_values): out_dict = {"detailed_feature_map_0": pixel_values} embeddings = pixel_values for i in range(len(self.convs)): embeddings = self.convs[i](embeddings) name_ = "detailed_feature_map_" + str(i + 1) out_dict[name_] = embeddings return out_dict class VitMatteFusionBlock(nn.Module): """ Simple fusion block to fuse features from ConvStream and Plain Vision Transformer. """ def __init__(self, config, in_channels, out_channels): super().__init__() self.conv = VitMatteBasicConv3x3(config, in_channels, out_channels, stride=1, padding=1) def forward(self, features, detailed_feature_map): upscaled_features = nn.functional.interpolate(features, scale_factor=2, mode="bilinear", align_corners=False) out = torch.cat([detailed_feature_map, upscaled_features], dim=1) out = self.conv(out) return out class VitMatteHead(nn.Module): """ Simple Matting Head, containing only conv3x3 and conv1x1 layers. """ def __init__(self, config): super().__init__() in_channels = config.fusion_hidden_sizes[-1] mid_channels = 16 self.matting_convs = nn.Sequential( nn.Conv2d(in_channels, mid_channels, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(mid_channels), nn.ReLU(True), nn.Conv2d(mid_channels, 1, kernel_size=1, stride=1, padding=0), ) def forward(self, hidden_state): hidden_state = self.matting_convs(hidden_state) return hidden_state class VitMatteDetailCaptureModule(nn.Module): """ Simple and lightweight Detail Capture Module for ViT Matting. """ def __init__(self, config): super().__init__() if len(config.fusion_hidden_sizes) != len(config.convstream_hidden_sizes) + 1: raise ValueError( "The length of fusion_hidden_sizes should be equal to the length of convstream_hidden_sizes + 1." ) self.config = config self.convstream = VitMatteConvStream(config) self.conv_chans = self.convstream.conv_chans self.fusion_blocks = nn.ModuleList() self.fusion_channels = [config.hidden_size] + config.fusion_hidden_sizes for i in range(len(self.fusion_channels) - 1): self.fusion_blocks.append( VitMatteFusionBlock( config=config, in_channels=self.fusion_channels[i] + self.conv_chans[-(i + 1)], out_channels=self.fusion_channels[i + 1], ) ) self.matting_head = VitMatteHead(config) def forward(self, features, pixel_values): detail_features = self.convstream(pixel_values) for i in range(len(self.fusion_blocks)): detailed_feature_map_name = "detailed_feature_map_" + str(len(self.fusion_blocks) - i - 1) features = self.fusion_blocks[i](features, detail_features[detailed_feature_map_name]) alphas = torch.sigmoid(self.matting_head(features)) return alphas VITMATTE_START_DOCSTRING = r""" Parameters: This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. config ([`UperNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ VITMATTE_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`VitMatteImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( """ViTMatte framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""", VITMATTE_START_DOCSTRING, ) class VitMatteForImageMatting(VitMattePreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.backbone = load_backbone(config) self.decoder = VitMatteDetailCaptureModule(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(VITMATTE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=ImageMattingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ): """ labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth image matting for computing the loss. Returns: Examples: ```python >>> from transformers import VitMatteImageProcessor, VitMatteForImageMatting >>> import torch >>> from PIL import Image >>> from huggingface_hub import hf_hub_download >>> processor = VitMatteImageProcessor.from_pretrained("hustvl/vitmatte-small-composition-1k") >>> model = VitMatteForImageMatting.from_pretrained("hustvl/vitmatte-small-composition-1k") >>> filepath = hf_hub_download( ... repo_id="hf-internal-testing/image-matting-fixtures", filename="image.png", repo_type="dataset" ... ) >>> image = Image.open(filepath).convert("RGB") >>> filepath = hf_hub_download( ... repo_id="hf-internal-testing/image-matting-fixtures", filename="trimap.png", repo_type="dataset" ... ) >>> trimap = Image.open(filepath).convert("L") >>> # prepare image + trimap for the model >>> inputs = processor(images=image, trimaps=trimap, return_tensors="pt") >>> with torch.no_grad(): ... alphas = model(**inputs).alphas >>> print(alphas.shape) torch.Size([1, 1, 640, 960]) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions outputs = self.backbone.forward_with_filtered_kwargs( pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions ) features = outputs.feature_maps[-1] alphas = self.decoder(features, pixel_values) loss = None if labels is not None: raise NotImplementedError("Training is not yet supported") if not return_dict: output = (alphas,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageMattingOutput( loss=loss, alphas=alphas, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/vitmatte/modeling_vitmatte.py/0
{ "file_path": "transformers/src/transformers/models/vitmatte/modeling_vitmatte.py", "repo_id": "transformers", "token_count": 5253 }
364
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax Wav2Vec2 model.""" from functools import partial from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_wav2vec2 import Wav2Vec2Config logger = logging.get_logger(__name__) @flax.struct.dataclass class FlaxWav2Vec2BaseModelOutput(ModelOutput): """ Output type of [`FlaxWav2Vec2BaseModelOutput`], with potential hidden states and attentions. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. extract_features (`jnp.ndarray` of shape `(batch_size, sequence_length, last_conv_dim)`): Sequence of extracted feature vectors of the last convolutional layer of the model with `last_conv_dim` being the dimension of the last convolutional layer. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None extract_features: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxWav2Vec2ForPreTrainingOutput(ModelOutput): """ Output type of [`FlaxWav2Vec2ForPreTrainingOutput`], with potential hidden states and attentions. Args: loss (*optional*, returned when model is in train mode, `jnp.ndarray` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss. projected_states (`jnp.ndarray` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`jnp.ndarray` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ projected_states: jnp.ndarray = None projected_quantized_states: jnp.ndarray = None codevector_perplexity: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[np.ndarray] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_length: size of the mask min_masks: minimum number of masked spans """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and" f" `sequence_length`: {sequence_length}`" ) # compute number of masked spans in batch num_masked_spans = int(mask_prob * sequence_length / mask_length + np.random.rand(1).item()) num_masked_spans = max(num_masked_spans, min_masks) # make sure num masked indices <= sequence_length if num_masked_spans * mask_length > sequence_length: num_masked_spans = sequence_length // mask_length # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) # get random indices to mask spec_aug_mask_idxs = np.array( [ np.random.choice(np.arange(sequence_length - (mask_length - 1)), num_masked_spans, replace=False) for _ in range(batch_size) ] ) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to(spec_aug_mask_idxs[:, :, None], (batch_size, num_masked_spans, mask_length)) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, num_masked_spans * mask_length) offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, num_masked_spans, mask_length)).reshape( batch_size, num_masked_spans * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) if attention_mask is not None: # make sure padded input ids cannot be masked spec_aug_mask = np.where(attention_mask, spec_aug_mask, False) return spec_aug_mask def _sample_negative_indices(features_shape: Tuple, num_negatives: int, attention_mask: Optional[np.ndarray] = None): """ Sample `num_negatives` vectors from feature vectors. """ batch_size, sequence_length, hidden_size = features_shape if sequence_length <= 1: raise ValueError( "`features should have `sequence_length` > 1, but are of shape " f"(batch_size, sequence_length, hidden_size) = ({batch_size, sequence_length, hidden_size})." ) # get `num_negatives` random vector indices from the same utterance sampled_negative_indices = [] for batch_idx in range(batch_size): high = attention_mask[batch_idx].sum() - 1 if attention_mask is not None else sequence_length - 1 sampled_indices_slice = np.random.randint(0, high, size=(num_negatives * sequence_length,)) sampled_negative_indices.append(sampled_indices_slice) sampled_negative_indices = np.asarray(sampled_negative_indices, dtype=np.int32) # generate indices of the positive vectors themselves, repeat them `num_negatives` times feature_indices = np.broadcast_to(np.arange(sequence_length)[:, None], (sequence_length, num_negatives)).flatten() # avoid sampling the same positive vector, but keep the distribution uniform sampled_negative_indices[sampled_negative_indices >= feature_indices] += 1 # correct for batch size for batch_idx in range(1, batch_size): sampled_negative_indices[batch_idx] += batch_idx * sequence_length return sampled_negative_indices WAV_2_VEC_2_START_DOCSTRING = r""" Wav2Vec2 was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`Wav2Vec2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ WAV_2_VEC_2_INPUTS_DOCSTRING = r""" Args: input_values (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `jnp.ndarray`. See [`Wav2Vec2Processor.__call__`] for details. attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) .. warning:: `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == True`. For all models whose processor has `config.return_attention_mask == False`, such as [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different results depending on whether `input_values` is padded or not. mask_time_indices (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxWav2Vec2LayerNormConvLayer(nn.Module): config: Wav2Vec2Config layer_id: int = 0 dtype: jnp.dtype = jnp.float32 def setup(self): self.in_conv_dim = self.config.conv_dim[self.layer_id] if self.layer_id > 0 else 1 self.out_conv_dim = self.config.conv_dim[self.layer_id] self.conv = nn.Conv( features=self.config.conv_dim[self.layer_id], kernel_size=(self.config.conv_kernel[self.layer_id],), strides=(self.config.conv_stride[self.layer_id],), use_bias=self.config.conv_bias, kernel_init=jax.nn.initializers.he_normal(), padding="VALID", dtype=self.dtype, ) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.activation = ACT2FN[self.config.feat_extract_activation] def __call__(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxConvWithWeightNorm(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( features=self.config.hidden_size, kernel_size=(self.config.num_conv_pos_embeddings,), kernel_init=jax.nn.initializers.he_normal(), padding="VALID", feature_group_count=self.config.num_conv_pos_embedding_groups, dtype=self.dtype, ) weight_shape = ( self.conv.features, self.conv.features // self.conv.feature_group_count, self.conv.kernel_size[0], ) self.weight_v = self.param("weight_v", jax.nn.initializers.he_normal(), weight_shape) self.weight_g = self.param("weight_g", lambda _: jnp.linalg.norm(self.weight_v, axis=(0, 1))[None, None, :]) self.bias = self.param("bias", jax.nn.initializers.zeros, (self.conv.features,)) self.prev_padding = self.conv.kernel_size[0] // 2 def _get_normed_weights(self): weight_v_norm = jnp.linalg.norm(self.weight_v, axis=(0, 1))[None, None, :] normed_weight_v = jnp.divide(self.weight_v, weight_v_norm) normed_kernel = jnp.multiply(normed_weight_v, self.weight_g) return normed_kernel def __call__(self, hidden_states): kernel = self._get_normed_weights() hidden_states = jnp.pad(hidden_states, ((0, 0), (self.prev_padding, self.prev_padding), (0, 0))) hidden_states = self.conv.apply({"params": {"kernel": kernel.T, "bias": self.bias}}, hidden_states) return hidden_states class FlaxWav2Vec2PositionalConvEmbedding(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = FlaxConvWithWeightNorm(self.config, dtype=self.dtype) self.activation = ACT2FN[self.config.feat_extract_activation] self.num_pad_remove = 1 if self.config.num_conv_pos_embeddings % 2 == 0 else 0 def __call__(self, hidden_states): hidden_states = hidden_states.transpose((0, 1, 2)) hidden_states = self.conv(hidden_states) if self.num_pad_remove > 0: hidden_states = hidden_states[:, : -self.num_pad_remove, :] hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose((0, 1, 2)) return hidden_states class FlaxConvLayersCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): if self.config.feat_extract_norm == "layer": self.layers = [ FlaxWav2Vec2LayerNormConvLayer(self.config, layer_id=i, name=str(i), dtype=self.dtype) for i in range(self.config.num_feat_extract_layers) ] elif self.config.feat_extract_norm == "group": raise NotImplementedError("At the moment only ``config.feat_extact_norm == 'layer'`` is supported") else: raise ValueError( f"`config.feat_extract_norm` is {self.config.feat_extract_norm}, but has to be one of ['group'," " 'layer']" ) def __call__(self, hidden_states): for i, conv_layer in enumerate(self.layers): hidden_states = conv_layer(hidden_states) return hidden_states class FlaxWav2Vec2FeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv_layers = FlaxConvLayersCollection(self.config, dtype=self.dtype) def __call__(self, input_values, freeze_feature_encoder=False): hidden_states = input_values[:, :, None] hidden_states = self.conv_layers(hidden_states) if freeze_feature_encoder: hidden_states = jax.lax.stop_gradient(hidden_states) return hidden_states class FlaxWav2Vec2FeatureProjection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.projection = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.feat_proj_dropout) def __call__(self, hidden_states, deterministic=True): norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states, norm_hidden_states class FlaxWav2Vec2Attention(nn.Module): config: Wav2Vec2Config embed_dim: int num_heads: int dropout: float = 0.0 bias: bool = True dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) dense = partial( nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() self.out_proj = dense() self.dropout_layer = nn.Dropout(rate=self.dropout) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) def __call__( self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray] = None, attention_mask: Optional[jnp.ndarray] = None, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: """Input shape: Batch x Time x Channel""" # get query proj query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) if attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class FlaxWav2Vec2FeedForward(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.intermediate_dropout = nn.Dropout(rate=self.config.activation_dropout) self.intermediate_dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) if isinstance(self.config.hidden_act, str): self.intermediate_act_fn = ACT2FN[self.config.hidden_act] else: self.intermediate_act_fn = self.config.hidden_act self.output_dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.output_dropout = nn.Dropout(rate=self.config.hidden_dropout) def __call__(self, hidden_states, deterministic=True): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states, deterministic=deterministic) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxWav2Vec2EncoderLayerStableLayerNorm(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxWav2Vec2Attention( config=self.config, embed_dim=self.config.hidden_size, num_heads=self.config.num_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.feed_forward = FlaxWav2Vec2FeedForward(self.config, dtype=self.dtype) self.final_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_mask=None, deterministic=True, output_attentions=False): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights = self.attention( hidden_states, attention_mask=attention_mask, deterministic=deterministic ) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward( self.final_layer_norm(hidden_states), deterministic=deterministic ) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class FlaxWav2Vec2EncoderLayerStableLayerNormCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [ FlaxWav2Vec2EncoderLayerStableLayerNorm(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask=None, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class FlaxWav2Vec2StableLayerNormEncoder(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.pos_conv_embed = FlaxWav2Vec2PositionalConvEmbedding(self.config, dtype=self.dtype) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout) self.layers = FlaxWav2Vec2EncoderLayerStableLayerNormCollection(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask=None, deterministic=True, output_attentions=False, output_hidden_states=False, return_dict=True, ): if attention_mask is not None: # make sure padded tokens are not attended to hidden_states = jnp.where( jnp.broadcast_to(attention_mask[:, :, None], hidden_states.shape), hidden_states, 0 ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = self.layer_norm(outputs[0]) # update the last element in `hidden_states` after applying `layernorm` above hidden_states = None if output_hidden_states: hidden_states = outputs[1] hidden_states = hidden_states[:-1] + (last_hidden_state,) if not return_dict: outputs = (last_hidden_state, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=outputs.attentions ) class FlaxWav2Vec2GumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information. """ config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.num_groups = self.config.num_codevector_groups self.num_vars = self.config.num_codevectors_per_group if self.config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {self.config.codevector_dim} must be divisible by" f" `config.num_codevector_groups` {self.num_groups} for concatenation" ) # storage for codebook variables (codewords) self.codevectors = self.param( "codevectors", jax.nn.initializers.uniform(), (1, self.num_groups * self.num_vars, self.config.codevector_dim // self.num_groups), ) self.weight_proj = nn.Dense( self.num_groups * self.num_vars, kernel_init=jax.nn.initializers.normal(1.0), dtype=self.dtype, ) @staticmethod def _compute_perplexity(probs, mask=None): if mask is not None: mask_extended = jnp.broadcast_to(mask.flatten()[:, None, None], probs.shape) probs = jnp.where(mask_extended, probs, jnp.zeros_like(probs)) marginal_probs = probs.sum(axis=0) / mask.sum() else: marginal_probs = probs.mean(axis=0) perplexity = jnp.exp(-jnp.sum(marginal_probs * jnp.log(marginal_probs + 1e-7), axis=-1)).sum() return perplexity def __call__(self, hidden_states, mask_time_indices=None, deterministic=True, temperature=1): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.reshape(batch_size * sequence_length * self.num_groups, -1) if not deterministic: # sample code vector probs via gumbel in differentiateable way gumbel_rng = self.make_rng("gumbel") gumbels = jax.random.gumbel(gumbel_rng, hidden_states.shape) codevector_probs = nn.softmax((hidden_states + gumbels) / temperature) # compute perplexity codevector_soft_dist = nn.softmax( hidden_states.reshape(batch_size * sequence_length, self.num_groups, -1), axis=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist, mask_time_indices) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(axis=-1) codevector_probs = jax.nn.one_hot(codevector_idx, hidden_states.shape[-1]) * 1.0 codevector_probs = codevector_probs.reshape(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs, mask_time_indices) codevector_probs = codevector_probs.reshape(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = jnp.expand_dims(codevector_probs, axis=-1) * self.codevectors codevectors = codevectors_per_group.reshape(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).reshape(batch_size, sequence_length, -1) return codevectors, perplexity class FlaxWav2Vec2Adapter(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): # hidden_states require down-projection if feature dims don't match if self.config.output_hidden_size != self.config.hidden_size: self.proj = nn.Dense( self.config.output_hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.proj_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) else: self.proj = self.proj_layer_norm = None self.layers = FlaxWav2Vec2AdapterLayersCollection(self.config, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): # down-project hidden_states if required if self.proj is not None and self.proj_layer_norm is not None: hidden_states = self.proj(hidden_states) hidden_states = self.proj_layer_norm(hidden_states) hidden_states = self.layers(hidden_states) return hidden_states class FlaxWav2Vec2AdapterLayer(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( features=2 * self.config.output_hidden_size, kernel_size=(self.config.adapter_kernel_size,), strides=(self.config.adapter_stride,), padding=((1, 1),), kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = nn.glu(hidden_states, axis=2) return hidden_states class FlaxWav2Vec2AdapterLayersCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [ FlaxWav2Vec2AdapterLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_adapter_layers) ] def __call__(self, hidden_states): for conv_layer in self.layers: hidden_states = conv_layer(hidden_states) return hidden_states class FlaxWav2Vec2PreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Wav2Vec2Config base_model_prefix: str = "wav2vec2" main_input_name = "input_values" module_class: nn.Module = None def __init__( self, config: Wav2Vec2Config, input_shape: Tuple = (1, 1024), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_values = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_values) params_rng, dropout_rng = jax.random.split(rng, 2) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, input_values, attention_mask, return_dict=False)["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, freeze_feature_encoder: bool = False, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_values.shape if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} return self.module.apply( inputs, jnp.array(input_values, dtype="f4"), jnp.array(attention_mask, dtype="i4"), mask_time_indices, not train, output_attentions, output_hidden_states, freeze_feature_encoder, return_dict, rngs=rngs, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): return self.module._get_feat_extract_output_lengths(input_lengths, add_adapter=add_adapter) class FlaxWav2Vec2Module(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.feature_extractor = FlaxWav2Vec2FeatureEncoder(self.config, dtype=self.dtype) self.feature_projection = FlaxWav2Vec2FeatureProjection(self.config, dtype=self.dtype) self.masked_spec_embed = self.param( "masked_spec_embed", jax.nn.initializers.uniform(), (self.config.hidden_size,) ) if self.config.do_stable_layer_norm: self.encoder = FlaxWav2Vec2StableLayerNormEncoder(self.config, dtype=self.dtype) else: raise NotImplementedError("``config.do_stable_layer_norm is False`` is currently not supported.") self.adapter = FlaxWav2Vec2Adapter(self.config, dtype=self.dtype) if self.config.add_adapter else None def __call__( self, input_values, attention_mask=None, mask_time_indices=None, deterministic=True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): extract_features = self.feature_extractor(input_values, freeze_feature_encoder=freeze_feature_encoder) # make sure that no loss is computed on padded inputs if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask( extract_features.shape[1], attention_mask, add_adapter=False ) hidden_states, extract_features = self.feature_projection(extract_features, deterministic=deterministic) if mask_time_indices is not None: # apply SpecAugment along time axis with given indices hidden_states = jnp.where( jnp.broadcast_to(mask_time_indices[:, :, None], hidden_states.shape), jnp.broadcast_to(self.masked_spec_embed[None, None, :], hidden_states.shape), hidden_states, ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if self.adapter is not None: hidden_states = self.adapter(hidden_states) if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return FlaxWav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths def _get_feature_vector_attention_mask( self, feature_vector_length: int, attention_mask: jnp.ndarray, add_adapter=None ): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(axis=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) batch_size = attention_mask.shape[0] attention_mask = jnp.zeros((batch_size, feature_vector_length), dtype=attention_mask.dtype) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask = attention_mask.at[jnp.arange(attention_mask.shape[0]), output_lengths - 1].set(1) attention_mask = jnp.flip(jnp.flip(attention_mask, -1).cumsum(-1), -1).astype("bool") return attention_mask @add_start_docstrings( "The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top.", WAV_2_VEC_2_START_DOCSTRING, ) class FlaxWav2Vec2Model(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2Module FLAX_WAV2VEC2_MODEL_DOCSTRING = """ Returns: Example: ```python >>> from transformers import AutoProcessor, FlaxWav2Vec2Model >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-large-lv60") >>> model = FlaxWav2Vec2Model.from_pretrained("facebook/wav2vec2-large-lv60") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor( ... ds["speech"][0], sampling_rate=16_000, return_tensors="np" ... ).input_values # Batch size 1 >>> hidden_states = model(input_values).last_hidden_state ``` """ overwrite_call_docstring( FlaxWav2Vec2Model, WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_MODEL_DOCSTRING, ) append_replace_return_docstrings( FlaxWav2Vec2Model, output_type=FlaxWav2Vec2BaseModelOutput, config_class=Wav2Vec2Config ) class FlaxWav2Vec2ForCTCModule(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.wav2vec2 = FlaxWav2Vec2Module(self.config, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.final_dropout) self.lm_head = nn.Dense( self.config.vocab_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, deterministic=True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): outputs = self.wav2vec2( input_values, attention_mask=attention_mask, mask_time_indices=mask_time_indices, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, freeze_feature_encoder=freeze_feature_encoder, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.lm_head(hidden_states) if not return_dict: return (logits,) + outputs[2:] return FlaxCausalLMOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None, ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths @add_start_docstrings( "Wav2Vec2 Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).", WAV_2_VEC_2_START_DOCSTRING, ) class FlaxWav2Vec2ForCTC(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2ForCTCModule FLAX_WAV2VEC2_FOR_CTC_DOCSTRING = """ Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import AutoProcessor, FlaxWav2Vec2ForCTC >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-large-960h-lv60") >>> model = FlaxWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor( ... ds["speech"][0], sampling_rate=16_000, return_tensors="np" ... ).input_values # Batch size 1 >>> logits = model(input_values).logits >>> predicted_ids = jnp.argmax(logits, axis=-1) >>> transcription = processor.decode(predicted_ids[0]) >>> # should give: "A MAN SAID TO THE UNIVERSE SIR I EXIST" ``` """ overwrite_call_docstring( FlaxWav2Vec2ForCTC, WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_FOR_CTC_DOCSTRING, ) append_replace_return_docstrings(FlaxWav2Vec2ForCTC, output_type=FlaxCausalLMOutput, config_class=Wav2Vec2Config) class FlaxWav2Vec2ForPreTrainingModule(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.wav2vec2 = FlaxWav2Vec2Module(self.config, dtype=self.dtype) self.dropout_features = nn.Dropout(self.config.feat_quantizer_dropout) self.quantizer = FlaxWav2Vec2GumbelVectorQuantizer(self.config, dtype=self.dtype) self.project_q = nn.Dense( self.config.proj_codevector_dim, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.project_hid = nn.Dense( self.config.proj_codevector_dim, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, gumbel_temperature: int = 1, deterministic: bool = True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): r""" Returns: Example: ```python ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.wav2vec2( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, mask_time_indices=mask_time_indices, deterministic=deterministic, freeze_feature_encoder=freeze_feature_encoder, return_dict=return_dict, ) # project all transformed features (including masked) to final vq dim transformer_features = self.project_hid(outputs[0]) # quantize all (unmasked) extracted features and project to final vq dim extract_features = self.dropout_features(outputs[1], deterministic=deterministic) quantized_features, codevector_perplexity = self.quantizer( extract_features, mask_time_indices, deterministic=deterministic, temperature=gumbel_temperature ) quantized_features = self.project_q(quantized_features) if not return_dict: return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return FlaxWav2Vec2ForPreTrainingOutput( projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths @add_start_docstrings("""Wav2Vec2 Model with a quantizer and `VQ` head on top.""", WAV_2_VEC_2_START_DOCSTRING) class FlaxWav2Vec2ForPreTraining(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2ForPreTrainingModule @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) # overwrite since has `gumbel_temperature` input def __call__( self, input_values, attention_mask=None, mask_time_indices=None, gumbel_temperature: int = 1, params: dict = None, dropout_rng: jax.random.PRNGKey = None, gumbel_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, freeze_feature_encoder: bool = False, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_values.shape if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng if gumbel_rng is not None: rngs["gumbel"] = gumbel_rng inputs = {"params": params or self.params} return self.module.apply( inputs, jnp.array(input_values, dtype="f4"), jnp.array(attention_mask, dtype="i4"), mask_time_indices, gumbel_temperature, not train, output_attentions, output_hidden_states, freeze_feature_encoder, return_dict, rngs=rngs, ) FLAX_WAV2VEC2_FOR_PRETRAINING_DOCSTRING = """ Returns: Example: ```python >>> import optax >>> import numpy as np >>> import jax.numpy as jnp >>> from transformers import AutoFeatureExtractor, FlaxWav2Vec2ForPreTraining >>> from transformers.models.wav2vec2.modeling_flax_wav2vec2 import _compute_mask_indices >>> from datasets import load_dataset >>> import soundfile as sf >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-large-lv60") >>> model = FlaxWav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-large-lv60") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = feature_extractor(ds["speech"][0], return_tensors="np").input_values # Batch size 1 >>> # compute masked indices >>> batch_size, raw_sequence_length = input_values.shape >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) >>> outputs = model(input_values, mask_time_indices=mask_time_indices) >>> # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states) >>> cosine_sim = optax.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states) >>> # show that cosine similarity is much higher than random >>> assert np.asarray(cosine_sim)[mask_time_indices].mean() > 0.5 ``` """ overwrite_call_docstring( FlaxWav2Vec2ForPreTraining, WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_FOR_PRETRAINING_DOCSTRING, ) append_replace_return_docstrings( FlaxWav2Vec2ForPreTraining, output_type=FlaxWav2Vec2ForPreTrainingOutput, config_class=Wav2Vec2Config )
transformers/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py", "repo_id": "transformers", "token_count": 24375 }
365
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Speech processor class for Whisper """ from ...processing_utils import ProcessorMixin class WhisperProcessor(ProcessorMixin): r""" Constructs a Whisper processor which wraps a Whisper feature extractor and a Whisper tokenizer into a single processor. [`WhisperProcessor`] offers all the functionalities of [`WhisperFeatureExtractor`] and [`WhisperTokenizer`]. See the [`~WhisperProcessor.__call__`] and [`~WhisperProcessor.decode`] for more information. Args: feature_extractor (`WhisperFeatureExtractor`): An instance of [`WhisperFeatureExtractor`]. The feature extractor is a required input. tokenizer (`WhisperTokenizer`): An instance of [`WhisperTokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "WhisperFeatureExtractor" tokenizer_class = "WhisperTokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps) def __call__(self, *args, **kwargs): """ Forwards the `audio` argument to WhisperFeatureExtractor's [`~WhisperFeatureExtractor.__call__`] and the `text` argument to [`~WhisperTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. """ # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) audio = kwargs.pop("audio", None) sampling_rate = kwargs.pop("sampling_rate", None) text = kwargs.pop("text", None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to WhisperTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to WhisperTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) def get_prompt_ids(self, text: str, return_tensors="np"): return self.tokenizer.get_prompt_ids(text, return_tensors=return_tensors)
transformers/src/transformers/models/whisper/processing_whisper.py/0
{ "file_path": "transformers/src/transformers/models/whisper/processing_whisper.py", "repo_id": "transformers", "token_count": 1465 }
366
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """ Tokenization classes for XLM-RoBERTa model.""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlm_roberta import XLMRobertaTokenizer else: XLMRobertaTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/tokenizer.json", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/tokenizer.json", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/tokenizer.json" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/tokenizer.json" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/tokenizer.json" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/tokenizer.json" ), }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "xlm-roberta-base": 512, "xlm-roberta-large": 512, "xlm-roberta-large-finetuned-conll02-dutch": 512, "xlm-roberta-large-finetuned-conll02-spanish": 512, "xlm-roberta-large-finetuned-conll03-english": 512, "xlm-roberta-large-finetuned-conll03-german": 512, } class XLMRobertaTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" XLM-RoBERTa tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = XLMRobertaTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__( vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, **kwargs, ) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory.") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
transformers/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py/0
{ "file_path": "transformers/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py", "repo_id": "transformers", "token_count": 4437 }
367
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for BERT model.""" import math import warnings from functools import partial from typing import Callable, Iterable, Optional, Tuple, Union import torch from torch import nn from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau from .trainer_utils import SchedulerType from .utils import logging from .utils.versions import require_version logger = logging.get_logger(__name__) def _get_constant_lambda(_=None): return 1 def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1): """ Create a schedule with a constant learning rate, using the learning rate set in optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ return LambdaLR(optimizer, _get_constant_lambda, last_epoch=last_epoch) def get_reduce_on_plateau_schedule(optimizer: Optimizer, **kwargs): """ Create a schedule with a constant learning rate that decreases when a metric has stopped improving. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. kwargs (`dict`, *optional*): Extra parameters to be passed to the scheduler. See `torch.optim.lr_scheduler.ReduceLROnPlateau` for possible parameters. Return: `torch.optim.lr_scheduler.ReduceLROnPlateau` with the appropriate schedule. """ return ReduceLROnPlateau(optimizer, **kwargs) def _get_constant_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1): """ Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_lambda = partial(_get_constant_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps) return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) def _get_linear_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))) def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_lambda = partial( _get_linear_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, ) return LambdaLR(optimizer, lr_lambda, last_epoch) def _get_cosine_schedule_with_warmup_lr_lambda( current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: float ): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) def get_cosine_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1 ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. num_cycles (`float`, *optional*, defaults to 0.5): The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_lambda = partial( _get_cosine_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles, ) return LambdaLR(optimizer, lr_lambda, last_epoch) def _get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda( current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: int ): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0)))) def get_cosine_with_hard_restarts_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1 ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. num_cycles (`int`, *optional*, defaults to 1): The number of hard restarts to use. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_lambda = partial( _get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles, ) return LambdaLR(optimizer, lr_lambda, last_epoch) def _get_polynomial_decay_schedule_with_warmup_lr_lambda( current_step: int, *, num_warmup_steps: int, num_training_steps: int, lr_end: float, power: float, lr_init: int, ): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lr_range = lr_init - lr_end decay_steps = num_training_steps - num_warmup_steps pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps decay = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init def get_polynomial_decay_schedule_with_warmup( optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1 ): """ Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. lr_end (`float`, *optional*, defaults to 1e-7): The end LR. power (`float`, *optional*, defaults to 1.0): Power factor. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37 Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_init = optimizer.defaults["lr"] if not (lr_init > lr_end): raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})") lr_lambda = partial( _get_polynomial_decay_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, lr_end=lr_end, power=power, lr_init=lr_init, ) return LambdaLR(optimizer, lr_lambda, last_epoch) def _get_inverse_sqrt_schedule_lr_lambda(current_step: int, *, num_warmup_steps: int, timescale: int = None): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) shift = timescale - num_warmup_steps decay = 1.0 / math.sqrt((current_step + shift) / timescale) return decay def get_inverse_sqrt_schedule( optimizer: Optimizer, num_warmup_steps: int, timescale: int = None, last_epoch: int = -1 ): """ Create a schedule with an inverse square-root learning rate, from the initial lr set in the optimizer, after a warmup period which increases lr linearly from 0 to the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. timescale (`int`, *optional*, defaults to `num_warmup_steps`): Time scale. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ # Note: this implementation is adapted from # https://github.com/google-research/big_vision/blob/f071ce68852d56099437004fd70057597a95f6ef/big_vision/utils.py#L930 if timescale is None: timescale = num_warmup_steps lr_lambda = partial(_get_inverse_sqrt_schedule_lr_lambda, num_warmup_steps=num_warmup_steps, timescale=timescale) return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) TYPE_TO_SCHEDULER_FUNCTION = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.INVERSE_SQRT: get_inverse_sqrt_schedule, SchedulerType.REDUCE_ON_PLATEAU: get_reduce_on_plateau_schedule, } def get_scheduler( name: Union[str, SchedulerType], optimizer: Optimizer, num_warmup_steps: Optional[int] = None, num_training_steps: Optional[int] = None, scheduler_specific_kwargs: Optional[dict] = None, ): """ Unified API to get any scheduler from its name. Args: name (`str` or `SchedulerType`): The name of the scheduler to use. optimizer (`torch.optim.Optimizer`): The optimizer that will be used during training. num_warmup_steps (`int`, *optional*): The number of warmup steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it's unset and the scheduler type requires it. num_training_steps (`int``, *optional*): The number of training steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it's unset and the scheduler type requires it. scheduler_specific_kwargs (`dict`, *optional*): Extra parameters for schedulers such as cosine with restarts. Mismatched scheduler types and scheduler parameters will cause the scheduler function to raise a TypeError. """ name = SchedulerType(name) schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(optimizer) if scheduler_specific_kwargs is None: scheduler_specific_kwargs = {} if name == SchedulerType.REDUCE_ON_PLATEAU: return schedule_func(optimizer, **scheduler_specific_kwargs) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) if name == SchedulerType.INVERSE_SQRT: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") return schedule_func( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, **scheduler_specific_kwargs, ) class AdamW(Optimizer): """ Implements Adam algorithm with weight decay fix as introduced in [Decoupled Weight Decay Regularization](https://arxiv.org/abs/1711.05101). Parameters: params (`Iterable[nn.parameter.Parameter]`): Iterable of parameters to optimize or dictionaries defining parameter groups. lr (`float`, *optional*, defaults to 0.001): The learning rate to use. betas (`Tuple[float,float]`, *optional*, defaults to `(0.9, 0.999)`): Adam's betas parameters (b1, b2). eps (`float`, *optional*, defaults to 1e-06): Adam's epsilon for numerical stability. weight_decay (`float`, *optional*, defaults to 0.0): Decoupled weight decay to apply. correct_bias (`bool`, *optional*, defaults to `True`): Whether or not to correct bias in Adam (for instance, in Bert TF repository they use `False`). no_deprecation_warning (`bool`, *optional*, defaults to `False`): A flag used to disable the deprecation warning (set to `True` to disable the warning). """ def __init__( self, params: Iterable[nn.parameter.Parameter], lr: float = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-6, weight_decay: float = 0.0, correct_bias: bool = True, no_deprecation_warning: bool = False, ): if not no_deprecation_warning: warnings.warn( "This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch" " implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this" " warning", FutureWarning, ) require_version("torch>=1.5.0") # add_ with alpha if lr < 0.0: raise ValueError(f"Invalid learning rate: {lr} - should be >= 0.0") if not 0.0 <= betas[0] < 1.0: raise ValueError(f"Invalid beta parameter: {betas[0]} - should be in [0.0, 1.0)") if not 0.0 <= betas[1] < 1.0: raise ValueError(f"Invalid beta parameter: {betas[1]} - should be in [0.0, 1.0)") if not 0.0 <= eps: raise ValueError(f"Invalid epsilon value: {eps} - should be >= 0.0") defaults = {"lr": lr, "betas": betas, "eps": eps, "weight_decay": weight_decay, "correct_bias": correct_bias} super().__init__(params, defaults) @torch.no_grad() def step(self, closure: Callable = None): """ Performs a single optimization step. Arguments: closure (`Callable`, *optional*): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead") state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p) exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] beta1, beta2 = group["betas"] state["step"] += 1 # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1)) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2) denom = exp_avg_sq.sqrt().add_(group["eps"]) step_size = group["lr"] if group["correct_bias"]: # No bias correction for Bert bias_correction1 = 1.0 - beta1 ** state["step"] bias_correction2 = 1.0 - beta2 ** state["step"] step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 p.addcdiv_(exp_avg, denom, value=-step_size) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. # Add weight decay at the end (fixed version) if group["weight_decay"] > 0.0: p.add_(p, alpha=(-group["lr"] * group["weight_decay"])) return loss class Adafactor(Optimizer): """ AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py Paper: *Adafactor: Adaptive Learning Rates with Sublinear Memory Cost* https://arxiv.org/abs/1804.04235 Note that this optimizer internally adjusts the learning rate depending on the `scale_parameter`, `relative_step` and `warmup_init` options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and `relative_step=False`. Arguments: params (`Iterable[nn.parameter.Parameter]`): Iterable of parameters to optimize or dictionaries defining parameter groups. lr (`float`, *optional*): The external learning rate. eps (`Tuple[float, float]`, *optional*, defaults to `(1e-30, 0.001)`): Regularization constants for square gradient and parameter scale respectively clip_threshold (`float`, *optional*, defaults to 1.0): Threshold of root mean square of final gradient update decay_rate (`float`, *optional*, defaults to -0.8): Coefficient used to compute running averages of square beta1 (`float`, *optional*): Coefficient used for computing running averages of gradient weight_decay (`float`, *optional*, defaults to 0.0): Weight decay (L2 penalty) scale_parameter (`bool`, *optional*, defaults to `True`): If True, learning rate is scaled by root mean square relative_step (`bool`, *optional*, defaults to `True`): If True, time-dependent learning rate is computed instead of external learning rate warmup_init (`bool`, *optional*, defaults to `False`): Time-dependent learning rate computation depends on whether warm-up initialization is being used This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested. Recommended T5 finetuning settings (https://discuss.huggingface.co/t/t5-finetuning-tips/684/3): - Training without LR warmup or clip_threshold is not recommended. - use scheduled LR warm-up to fixed LR - use clip_threshold=1.0 (https://arxiv.org/abs/1804.04235) - Disable relative updates - Use scale_parameter=False - Additional optimizer operations like gradient clipping should not be used alongside Adafactor Example: ```python Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3) ``` Others reported the following combination to work well: ```python Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) ``` When using `lr=None` with [`Trainer`] you will most likely need to use [`~optimization.AdafactorSchedule`] scheduler as following: ```python from transformers.optimization import Adafactor, AdafactorSchedule optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(..., optimizers=(optimizer, lr_scheduler)) ``` Usage: ```python # replace AdamW with Adafactor optimizer = Adafactor( model.parameters(), lr=1e-3, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, relative_step=False, scale_parameter=False, warmup_init=False, ) ```""" def __init__( self, params, lr=None, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False, ): require_version("torch>=1.5.0") # add_ with alpha if lr is not None and relative_step: raise ValueError("Cannot combine manual `lr` and `relative_step=True` options") if warmup_init and not relative_step: raise ValueError("`warmup_init=True` requires `relative_step=True`") defaults = { "lr": lr, "eps": eps, "clip_threshold": clip_threshold, "decay_rate": decay_rate, "beta1": beta1, "weight_decay": weight_decay, "scale_parameter": scale_parameter, "relative_step": relative_step, "warmup_init": warmup_init, } super().__init__(params, defaults) @staticmethod def _get_lr(param_group, param_state): rel_step_sz = param_group["lr"] if param_group["relative_step"]: min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) param_scale = 1.0 if param_group["scale_parameter"]: param_scale = max(param_group["eps"][1], param_state["RMS"]) return param_scale * rel_step_sz @staticmethod def _get_options(param_group, param_shape): factored = len(param_shape) >= 2 use_first_moment = param_group["beta1"] is not None return factored, use_first_moment @staticmethod def _rms(tensor): return tensor.norm(2) / (tensor.numel() ** 0.5) @staticmethod def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): # copy from fairseq's adafactor implementation: # https://github.com/huggingface/transformers/blob/8395f14de6068012787d83989c3627c3df6a252b/src/transformers/optimization.py#L505 r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() return torch.mul(r_factor, c_factor) @torch.no_grad() def step(self, closure=None): """ Performs a single optimization step Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError("Adafactor does not support sparse gradients.") state = self.state[p] grad_shape = grad.shape factored, use_first_moment = self._get_options(group, grad_shape) # State Initialization if len(state) == 0: state["step"] = 0 if use_first_moment: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(grad) if factored: state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad) state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad) else: state["exp_avg_sq"] = torch.zeros_like(grad) state["RMS"] = 0 else: if use_first_moment: state["exp_avg"] = state["exp_avg"].to(grad) if factored: state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad) state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad) else: state["exp_avg_sq"] = state["exp_avg_sq"].to(grad) p_data_fp32 = p if p.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state["step"] += 1 state["RMS"] = self._rms(p_data_fp32) lr = self._get_lr(group, state) beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) update = (grad**2) + group["eps"][0] if factored: exp_avg_sq_row = state["exp_avg_sq_row"] exp_avg_sq_col = state["exp_avg_sq_col"] exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=(1.0 - beta2t)) exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=(1.0 - beta2t)) # Approximation of exponential moving average of square of gradient update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state["exp_avg_sq"] exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t)) update = exp_avg_sq.rsqrt().mul_(grad) update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)) update.mul_(lr) if use_first_moment: exp_avg = state["exp_avg"] exp_avg.mul_(group["beta1"]).add_(update, alpha=(1 - group["beta1"])) update = exp_avg if group["weight_decay"] != 0: p_data_fp32.add_(p_data_fp32, alpha=(-group["weight_decay"] * lr)) p_data_fp32.add_(-update) if p.dtype in {torch.float16, torch.bfloat16}: p.copy_(p_data_fp32) return loss class AdafactorSchedule(LambdaLR): """ Since [`~optimization.Adafactor`] performs its own scheduling, if the training loop relies on a scheduler (e.g., for logging), this class creates a proxy object that retrieves the current lr values from the optimizer. It returns `initial_lr` during startup and the actual `lr` during stepping. """ def __init__(self, optimizer, initial_lr=0.0): def lr_lambda(_): return initial_lr for group in optimizer.param_groups: group["initial_lr"] = initial_lr super().__init__(optimizer, lr_lambda) for group in optimizer.param_groups: del group["initial_lr"] def get_lr(self): opt = self.optimizer lrs = [ opt._get_lr(group, opt.state[group["params"][0]]) for group in opt.param_groups if group["params"][0].grad is not None ] if len(lrs) == 0: lrs = self.base_lrs # if called before stepping return lrs def get_adafactor_schedule(optimizer, initial_lr=0.0): """ Get a proxy schedule for [`~optimization.Adafactor`] Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. initial_lr (`float`, *optional*, defaults to 0.0): Initial lr Return: [`~optimization.Adafactor`] proxy schedule object. """ return AdafactorSchedule(optimizer, initial_lr)
transformers/src/transformers/optimization.py/0
{ "file_path": "transformers/src/transformers/optimization.py", "repo_id": "transformers", "token_count": 13951 }
368
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import ChunkPipeline, build_pipeline_init_args if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING_NAMES logger = logging.get_logger(__name__) @add_end_docstrings( build_pipeline_init_args(has_image_processor=True), r""" points_per_batch (*optional*, int, default to 64): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. output_bboxes_mask (`bool`, *optional*, default to `False`): Whether or not to output the bounding box predictions. output_rle_masks (`bool`, *optional*, default to `False`): Whether or not to output the masks in `RLE` format""", ) class MaskGenerationPipeline(ChunkPipeline): """ Automatic mask generation for images using `SamForMaskGeneration`. This pipeline predicts binary masks for an image, given an image. It is a `ChunkPipeline` because you can seperate the points in a mini-batch in order to avoid OOM issues. Use the `points_per_batch` argument to control the number of points that will be processed at the same time. Default is `64`. The pipeline works in 3 steps: 1. `preprocess`: A grid of 1024 points evenly separated is generated along with bounding boxes and point labels. For more details on how the points and bounding boxes are created, check the `_generate_crop_boxes` function. The image is also preprocessed using the `image_processor`. This function `yields` a minibatch of `points_per_batch`. 2. `forward`: feeds the outputs of `preprocess` to the model. The image embedding is computed only once. Calls both `self.model.get_image_embeddings` and makes sure that the gradients are not computed, and the tensors and models are on the same device. 3. `postprocess`: The most important part of the automatic mask generation happens here. Three steps are induced: - image_processor.postprocess_masks (run on each minibatch loop): takes in the raw output masks, resizes them according to the image size, and transforms there to binary masks. - image_processor.filter_masks (on each minibatch loop): uses both `pred_iou_thresh` and `stability_scores`. Also applies a variety of filters based on non maximum suppression to remove bad masks. - image_processor.postprocess_masks_for_amg applies the NSM on the mask to only keep relevant ones. Example: ```python >>> from transformers import pipeline >>> generator = pipeline(model="facebook/sam-vit-base", task="mask-generation") >>> outputs = generator( ... "http://images.cocodataset.org/val2017/000000039769.jpg", ... ) >>> outputs = generator( ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", points_per_batch=128 ... ) ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"mask-generation"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=mask-generation). """ def __init__(self, **kwargs): super().__init__(**kwargs) requires_backends(self, "vision") requires_backends(self, "torch") if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch.") self.check_model_type(MODEL_FOR_MASK_GENERATION_MAPPING_NAMES) def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} postprocess_kwargs = {} forward_params = {} # preprocess args if "points_per_batch" in kwargs: preprocess_kwargs["points_per_batch"] = kwargs["points_per_batch"] if "points_per_crop" in kwargs: preprocess_kwargs["points_per_crop"] = kwargs["points_per_crop"] if "crops_n_layers" in kwargs: preprocess_kwargs["crops_n_layers"] = kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: preprocess_kwargs["crop_overlap_ratio"] = kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: preprocess_kwargs["crop_n_points_downscale_factor"] = kwargs["crop_n_points_downscale_factor"] if "timeout" in kwargs: preprocess_kwargs["timeout"] = kwargs["timeout"] # postprocess args if "pred_iou_thresh" in kwargs: forward_params["pred_iou_thresh"] = kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: forward_params["stability_score_offset"] = kwargs["stability_score_offset"] if "mask_threshold" in kwargs: forward_params["mask_threshold"] = kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: forward_params["stability_score_thresh"] = kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: postprocess_kwargs["crops_nms_thresh"] = kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: postprocess_kwargs["output_rle_mask"] = kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: postprocess_kwargs["output_bboxes_mask"] = kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self, image, *args, num_workers=None, batch_size=None, **kwargs): """ Generates binary segmentation masks Args: inputs (`np.ndarray` or `bytes` or `str` or `dict`): Image or list of images. mask_threshold (`float`, *optional*, defaults to 0.0): Threshold to use when turning the predicted masks into binary values. pred_iou_thresh (`float`, *optional*, defaults to 0.88): A filtering threshold in `[0,1]` applied on the model's predicted mask quality. stability_score_thresh (`float`, *optional*, defaults to 0.95): A filtering threshold in `[0,1]`, using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (`int`, *optional*, defaults to 1): The amount to shift the cutoff when calculated the stability score. crops_nms_thresh (`float`, *optional*, defaults to 0.7): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crops_n_layers (`int`, *optional*, defaults to 0): If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_overlap_ratio (`float`, *optional*, defaults to `512 / 1500`): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (`int`, *optional*, defaults to `1`): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: `Dict`: A dictionary with the following keys: - **mask** (`PIL.Image`) -- A binary mask of the detected object as a PIL Image of shape `(width, height)` of the original image. Returns a mask filled with zeros if no object is found. - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the "object" described by the label and the mask. """ return super().__call__(image, *args, num_workers=num_workers, batch_size=batch_size, **kwargs) def preprocess( self, image, points_per_batch=64, crops_n_layers: int = 0, crop_overlap_ratio: float = 512 / 1500, points_per_crop: Optional[int] = 32, crop_n_points_downscale_factor: Optional[int] = 1, timeout: Optional[float] = None, ): image = load_image(image, timeout=timeout) target_size = self.image_processor.size["longest_edge"] crop_boxes, grid_points, cropped_images, input_labels = self.image_processor.generate_crop_boxes( image, target_size, crops_n_layers, crop_overlap_ratio, points_per_crop, crop_n_points_downscale_factor ) model_inputs = self.image_processor(images=cropped_images, return_tensors="pt") with self.device_placement(): if self.framework == "pt": inference_context = self.get_inference_context() with inference_context(): model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device) image_embeddings = self.model.get_image_embeddings(model_inputs.pop("pixel_values")) model_inputs["image_embeddings"] = image_embeddings n_points = grid_points.shape[1] points_per_batch = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None" ) for i in range(0, n_points, points_per_batch): batched_points = grid_points[:, i : i + points_per_batch, :, :] labels = input_labels[:, i : i + points_per_batch] is_last = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _forward( self, model_inputs, pred_iou_thresh=0.88, stability_score_thresh=0.95, mask_threshold=0, stability_score_offset=1, ): input_boxes = model_inputs.pop("input_boxes") is_last = model_inputs.pop("is_last") original_sizes = model_inputs.pop("original_sizes").tolist() reshaped_input_sizes = model_inputs.pop("reshaped_input_sizes").tolist() model_outputs = self.model(**model_inputs) # post processing happens here in order to avoid CPU GPU copies of ALL the masks low_resolution_masks = model_outputs["pred_masks"] masks = self.image_processor.post_process_masks( low_resolution_masks, original_sizes, reshaped_input_sizes, mask_threshold, binarize=False ) iou_scores = model_outputs["iou_scores"] masks, iou_scores, boxes = self.image_processor.filter_masks( masks[0], iou_scores[0], original_sizes[0], input_boxes[0], pred_iou_thresh, stability_score_thresh, mask_threshold, stability_score_offset, ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def postprocess( self, model_outputs, output_rle_mask=False, output_bboxes_mask=False, crops_nms_thresh=0.7, ): all_scores = [] all_masks = [] all_boxes = [] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores")) all_masks.extend(model_output.pop("masks")) all_boxes.append(model_output.pop("boxes")) all_scores = torch.cat(all_scores) all_boxes = torch.cat(all_boxes) output_masks, iou_scores, rle_mask, bounding_boxes = self.image_processor.post_process_for_mask_generation( all_masks, all_scores, all_boxes, crops_nms_thresh ) extra = defaultdict(list) for output in model_outputs: for k, v in output.items(): extra[k].append(v) optional = {} if output_rle_mask: optional["rle_mask"] = rle_mask if output_bboxes_mask: optional["bounding_boxes"] = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
transformers/src/transformers/pipelines/mask_generation.py/0
{ "file_path": "transformers/src/transformers/pipelines/mask_generation.py", "repo_id": "transformers", "token_count": 5619 }
369
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processing saving/loading class for common processors. """ import copy import inspect import json import os import warnings from pathlib import Path from typing import Any, Dict, Optional, Tuple, Union from .dynamic_module_utils import custom_object_save from .tokenization_utils_base import PreTrainedTokenizerBase from .utils import ( PROCESSOR_NAME, PushToHubMixin, add_model_info_to_auto_map, cached_file, copy_func, direct_transformers_import, download_url, is_offline_mode, is_remote_url, logging, ) logger = logging.get_logger(__name__) # Dynamically import the Transformers module to grab the attribute classes of the processor form their names. transformers_module = direct_transformers_import(Path(__file__).parent) AUTO_TO_BASE_CLASS_MAPPING = { "AutoTokenizer": "PreTrainedTokenizerBase", "AutoFeatureExtractor": "FeatureExtractionMixin", "AutoImageProcessor": "ImageProcessingMixin", } class ProcessorMixin(PushToHubMixin): """ This is a mixin used to provide saving/loading functionality for all processor classes. """ attributes = ["feature_extractor", "tokenizer"] # Names need to be attr_class for attr in attributes feature_extractor_class = None tokenizer_class = None _auto_class = None # args have to match the attributes class attribute def __init__(self, *args, **kwargs): # Sanitize args and kwargs for key in kwargs: if key not in self.attributes: raise TypeError(f"Unexpected keyword argument {key}.") for arg, attribute_name in zip(args, self.attributes): if attribute_name in kwargs: raise TypeError(f"Got multiple values for argument {attribute_name}.") else: kwargs[attribute_name] = arg if len(kwargs) != len(self.attributes): raise ValueError( f"This processor requires {len(self.attributes)} arguments: {', '.join(self.attributes)}. Got " f"{len(args)} arguments instead." ) # Check each arg is of the proper class (this will also catch a user initializing in the wrong order) for attribute_name, arg in kwargs.items(): class_name = getattr(self, f"{attribute_name}_class") # Nothing is ever going to be an instance of "AutoXxx", in that case we check the base class. class_name = AUTO_TO_BASE_CLASS_MAPPING.get(class_name, class_name) if isinstance(class_name, tuple): proper_class = tuple(getattr(transformers_module, n) for n in class_name if n is not None) else: proper_class = getattr(transformers_module, class_name) if not isinstance(arg, proper_class): raise ValueError( f"Received a {type(arg).__name__} for argument {attribute_name}, but a {class_name} was expected." ) setattr(self, attribute_name, arg) def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this processor instance. """ output = copy.deepcopy(self.__dict__) # Get the kwargs in `__init__`. sig = inspect.signature(self.__init__) # Only save the attributes that are presented in the kwargs of `__init__`. attrs_to_save = sig.parameters # Don't save attributes like `tokenizer`, `image processor` etc. attrs_to_save = [x for x in attrs_to_save if x not in self.__class__.attributes] # extra attributes to be kept attrs_to_save += ["auto_map"] output = {k: v for k, v in output.items() if k in attrs_to_save} output["processor_class"] = self.__class__.__name__ if "tokenizer" in output: del output["tokenizer"] if "image_processor" in output: del output["image_processor"] if "feature_extractor" in output: del output["feature_extractor"] # Some attributes have different names but containing objects that are not simple strings output = { k: v for k, v in output.items() if not (isinstance(v, PushToHubMixin) or v.__class__.__name__ == "BeamSearchDecoderCTC") } return output def to_json_string(self) -> str: """ Serializes this instance to a JSON string. Returns: `str`: String containing all the attributes that make up this feature_extractor instance in JSON format. """ dictionary = self.to_dict() return json.dumps(dictionary, indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path: Union[str, os.PathLike]): """ Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this processor instance's parameters will be saved. """ with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string()) def __repr__(self): attributes_repr = [f"- {name}: {repr(getattr(self, name))}" for name in self.attributes] attributes_repr = "\n".join(attributes_repr) return f"{self.__class__.__name__}:\n{attributes_repr}\n\n{self.to_json_string()}" def save_pretrained(self, save_directory, push_to_hub: bool = False, **kwargs): """ Saves the attributes of this processor (feature extractor, tokenizer...) in the specified directory so that it can be reloaded using the [`~ProcessorMixin.from_pretrained`] method. <Tip> This class method is simply calling [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] and [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`]. Please refer to the docstrings of the methods above for more information. </Tip> Args: save_directory (`str` or `os.PathLike`): Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token", None) is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: attrs = [getattr(self, attribute_name) for attribute_name in self.attributes] configs = [(a.init_kwargs if isinstance(a, PreTrainedTokenizerBase) else a) for a in attrs] configs.append(self) custom_object_save(self, save_directory, config=configs) for attribute_name in self.attributes: attribute = getattr(self, attribute_name) # Include the processor class in the attribute config so this processor can then be reloaded with the # `AutoProcessor` API. if hasattr(attribute, "_set_processor_class"): attribute._set_processor_class(self.__class__.__name__) attribute.save_pretrained(save_directory) if self._auto_class is not None: # We added an attribute to the init_kwargs of the tokenizers, which needs to be cleaned up. for attribute_name in self.attributes: attribute = getattr(self, attribute_name) if isinstance(attribute, PreTrainedTokenizerBase): del attribute.init_kwargs["auto_map"] # If we save using the predefined names, we can load using `from_pretrained` output_processor_file = os.path.join(save_directory, PROCESSOR_NAME) # For now, let's not save to `processor_config.json` if the processor doesn't have extra attributes and # `auto_map` is not specified. if set(self.to_dict().keys()) != {"processor_class"}: self.to_json_file(output_processor_file) logger.info(f"processor saved in {output_processor_file}") if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get("token"), ) if set(self.to_dict().keys()) == {"processor_class"}: return [] return [output_processor_file] @classmethod def get_processor_dict( cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a processor of type [`~processing_utils.ProcessingMixin`] using `from_args_and_dict`. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`): The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. Returns: `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the processor object. """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) local_files_only = kwargs.pop("local_files_only", False) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", "") from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) user_agent = {"file_type": "processor", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): processor_file = os.path.join(pretrained_model_name_or_path, PROCESSOR_NAME) if os.path.isfile(pretrained_model_name_or_path): resolved_processor_file = pretrained_model_name_or_path is_local = True elif is_remote_url(pretrained_model_name_or_path): processor_file = pretrained_model_name_or_path resolved_processor_file = download_url(pretrained_model_name_or_path) else: processor_file = PROCESSOR_NAME try: # Load from local folder or from cache or download from model Hub and cache resolved_processor_file = cached_file( pretrained_model_name_or_path, processor_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _raise_exceptions_for_missing_entries=False, ) except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to # the original exception. raise except Exception: # For any other exception, we throw a generic error. raise EnvironmentError( f"Can't load processor for '{pretrained_model_name_or_path}'. If you were trying to load" " it from 'https://huggingface.co/models', make sure you don't have a local directory with the" f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" f" directory containing a {PROCESSOR_NAME} file" ) # Existing processors on the Hub created before #27761 being merged don't have `processor_config.json` (if not # updated afterward), and we need to keep `from_pretrained` work. So here it fallbacks to the empty dict. # (`cached_file` called using `_raise_exceptions_for_missing_entries=False` to avoid exception) # However, for models added in the future, we won't get the expected error if this file is missing. if resolved_processor_file is None: return {}, kwargs try: # Load processor dict with open(resolved_processor_file, "r", encoding="utf-8") as reader: text = reader.read() processor_dict = json.loads(text) except json.JSONDecodeError: raise EnvironmentError( f"It looks like the config file at '{resolved_processor_file}' is not a valid JSON file." ) if is_local: logger.info(f"loading configuration file {resolved_processor_file}") else: logger.info(f"loading configuration file {processor_file} from cache at {resolved_processor_file}") if "auto_map" in processor_dict and not is_local: processor_dict["auto_map"] = add_model_info_to_auto_map( processor_dict["auto_map"], pretrained_model_name_or_path ) return processor_dict, kwargs @classmethod def from_args_and_dict(cls, args, processor_dict: Dict[str, Any], **kwargs): """ Instantiates a type of [`~processing_utils.ProcessingMixin`] from a Python dictionary of parameters. Args: processor_dict (`Dict[str, Any]`): Dictionary that will be used to instantiate the processor object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the [`~processing_utils.ProcessingMixin.to_dict`] method. kwargs (`Dict[str, Any]`): Additional parameters from which to initialize the processor object. Returns: [`~processing_utils.ProcessingMixin`]: The processor object instantiated from those parameters. """ processor_dict = processor_dict.copy() return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) # Unlike image processors or feature extractors whose `__init__` accept `kwargs`, processor don't have `kwargs`. # We have to pop up some unused (but specific) arguments to make it work. if "processor_class" in processor_dict: del processor_dict["processor_class"] if "auto_map" in processor_dict: del processor_dict["auto_map"] processor = cls(*args, **processor_dict) # Update processor with kwargs if needed for key in set(kwargs.keys()): if hasattr(processor, key): setattr(processor, key, kwargs.pop(key)) logger.info(f"Processor {processor}") if return_unused_kwargs: return processor, kwargs else: return processor @classmethod def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", **kwargs, ): r""" Instantiate a processor associated with a pretrained model. <Tip> This class method is simply calling the feature extractor [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`], image processor [`~image_processing_utils.ImageProcessingMixin`] and the tokenizer [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] methods. Please refer to the docstrings of the methods above for more information. </Tip> Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a feature extractor file saved using the [`~SequenceFeatureExtractor.save_pretrained`] method, e.g., `./my_model_directory/`. - a path or url to a saved feature extractor JSON *file*, e.g., `./my_model_directory/preprocessor_config.json`. **kwargs Additional keyword arguments passed along to both [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] and [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`]. """ kwargs["cache_dir"] = cache_dir kwargs["force_download"] = force_download kwargs["local_files_only"] = local_files_only kwargs["revision"] = revision use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: kwargs["token"] = token args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs) processor_dict, kwargs = cls.get_processor_dict(pretrained_model_name_or_path, **kwargs) return cls.from_args_and_dict(args, processor_dict, **kwargs) @classmethod def register_for_auto_class(cls, auto_class="AutoProcessor"): """ Register this class with a given auto class. This should only be used for custom feature extractors as the ones in the library are already mapped with `AutoProcessor`. <Tip warning={true}> This API is experimental and may have some slight breaking changes in the next releases. </Tip> Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoProcessor"`): The auto class to register this new feature extractor with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class @classmethod def _get_arguments_from_pretrained(cls, pretrained_model_name_or_path, **kwargs): args = [] for attribute_name in cls.attributes: class_name = getattr(cls, f"{attribute_name}_class") if isinstance(class_name, tuple): classes = tuple(getattr(transformers_module, n) if n is not None else None for n in class_name) use_fast = kwargs.get("use_fast", True) if use_fast and classes[1] is not None: attribute_class = classes[1] else: attribute_class = classes[0] else: attribute_class = getattr(transformers_module, class_name) args.append(attribute_class.from_pretrained(pretrained_model_name_or_path, **kwargs)) return args @property def model_input_names(self): first_attribute = getattr(self, self.attributes[0]) return getattr(first_attribute, "model_input_names", None) ProcessorMixin.push_to_hub = copy_func(ProcessorMixin.push_to_hub) if ProcessorMixin.push_to_hub.__doc__ is not None: ProcessorMixin.push_to_hub.__doc__ = ProcessorMixin.push_to_hub.__doc__.format( object="processor", object_class="AutoProcessor", object_files="processor files" )
transformers/src/transformers/processing_utils.py/0
{ "file_path": "transformers/src/transformers/processing_utils.py", "repo_id": "transformers", "token_count": 9681 }
370
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Time series distributional output classes and utilities. """ from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class AffineTransformed(TransformedDistribution): def __init__(self, base_distribution: Distribution, loc=None, scale=None, event_dim=0): self.scale = 1.0 if scale is None else scale self.loc = 0.0 if loc is None else loc super().__init__(base_distribution, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=event_dim)]) @property def mean(self): """ Returns the mean of the distribution. """ return self.base_dist.mean * self.scale + self.loc @property def variance(self): """ Returns the variance of the distribution. """ return self.base_dist.variance * self.scale**2 @property def stddev(self): """ Returns the standard deviation of the distribution. """ return self.variance.sqrt() class ParameterProjection(nn.Module): def __init__( self, in_features: int, args_dim: Dict[str, int], domain_map: Callable[..., Tuple[torch.Tensor]], **kwargs ) -> None: super().__init__(**kwargs) self.args_dim = args_dim self.proj = nn.ModuleList([nn.Linear(in_features, dim) for dim in args_dim.values()]) self.domain_map = domain_map def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]: params_unbounded = [proj(x) for proj in self.proj] return self.domain_map(*params_unbounded) class LambdaLayer(nn.Module): def __init__(self, function): super().__init__() self.function = function def forward(self, x, *args): return self.function(x, *args) class DistributionOutput: distribution_class: type in_features: int args_dim: Dict[str, int] def __init__(self, dim: int = 1) -> None: self.dim = dim self.args_dim = {k: dim * self.args_dim[k] for k in self.args_dim} def _base_distribution(self, distr_args): if self.dim == 1: return self.distribution_class(*distr_args) else: return Independent(self.distribution_class(*distr_args), 1) def distribution( self, distr_args, loc: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None, ) -> Distribution: distr = self._base_distribution(distr_args) if loc is None and scale is None: return distr else: return AffineTransformed(distr, loc=loc, scale=scale, event_dim=self.event_dim) @property def event_shape(self) -> Tuple: r""" Shape of each individual event contemplated by the distributions that this object constructs. """ return () if self.dim == 1 else (self.dim,) @property def event_dim(self) -> int: r""" Number of event dimensions, i.e., length of the `event_shape` tuple, of the distributions that this object constructs. """ return len(self.event_shape) @property def value_in_support(self) -> float: r""" A float that will have a valid numeric value when computing the log-loss of the corresponding distribution. By default 0.0. This value will be used when padding data series. """ return 0.0 def get_parameter_projection(self, in_features: int) -> nn.Module: r""" Return the parameter projection layer that maps the input to the appropriate parameters of the distribution. """ return ParameterProjection( in_features=in_features, args_dim=self.args_dim, domain_map=LambdaLayer(self.domain_map), ) def domain_map(self, *args: torch.Tensor): r""" Converts arguments to the right shape and domain. The domain depends on the type of distribution, while the correct shape is obtained by reshaping the trailing axis in such a way that the returned tensors define a distribution of the right event_shape. """ raise NotImplementedError() @staticmethod def squareplus(x: torch.Tensor) -> torch.Tensor: r""" Helper to map inputs to the positive orthant by applying the square-plus operation. Reference: https://twitter.com/jon_barron/status/1387167648669048833 """ return (x + torch.sqrt(torch.square(x) + 4.0)) / 2.0 class StudentTOutput(DistributionOutput): """ Student-T distribution output class. """ args_dim: Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} distribution_class: type = StudentT @classmethod def domain_map(cls, df: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor): scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps) df = 2.0 + cls.squareplus(df) return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1) class NormalOutput(DistributionOutput): """ Normal distribution output class. """ args_dim: Dict[str, int] = {"loc": 1, "scale": 1} distribution_class: type = Normal @classmethod def domain_map(cls, loc: torch.Tensor, scale: torch.Tensor): scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps) return loc.squeeze(-1), scale.squeeze(-1) class NegativeBinomialOutput(DistributionOutput): """ Negative Binomial distribution output class. """ args_dim: Dict[str, int] = {"total_count": 1, "logits": 1} distribution_class: type = NegativeBinomial @classmethod def domain_map(cls, total_count: torch.Tensor, logits: torch.Tensor): total_count = cls.squareplus(total_count) return total_count.squeeze(-1), logits.squeeze(-1) def _base_distribution(self, distr_args) -> Distribution: total_count, logits = distr_args if self.dim == 1: return self.distribution_class(total_count=total_count, logits=logits) else: return Independent(self.distribution_class(total_count=total_count, logits=logits), 1) # Overwrites the parent class method. We cannot scale using the affine # transformation since negative binomial should return integers. Instead # we scale the parameters. def distribution( self, distr_args, loc: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None ) -> Distribution: total_count, logits = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits))
transformers/src/transformers/time_series_utils.py/0
{ "file_path": "transformers/src/transformers/time_series_utils.py", "repo_id": "transformers", "token_count": 2916 }
371
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class TextClassificationTool(PipelineTool): """ Example: ```py from transformers.tools import TextClassificationTool classifier = TextClassificationTool() classifier("This is a super nice API!", labels=["positive", "negative"]) ``` """ default_checkpoint = "facebook/bart-large-mnli" description = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) name = "text_classifier" pre_processor_class = AutoTokenizer model_class = AutoModelForSequenceClassification inputs = ["text", ["text"]] outputs = ["text"] def setup(self): super().setup() config = self.model.config self.entailment_id = -1 for idx, label in config.id2label.items(): if label.lower().startswith("entail"): self.entailment_id = int(idx) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init.") def encode(self, text, labels): self._labels = labels return self.pre_processor( [text] * len(labels), [f"This example is {label}" for label in labels], return_tensors="pt", padding="max_length", ) def decode(self, outputs): logits = outputs.logits label_id = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
transformers/src/transformers/tools/text_classification.py/0
{ "file_path": "transformers/src/transformers/tools/text_classification.py", "repo_id": "transformers", "token_count": 874 }
372
IMAGENET_DEFAULT_MEAN = [0.485, 0.456, 0.406] IMAGENET_DEFAULT_STD = [0.229, 0.224, 0.225] IMAGENET_STANDARD_MEAN = [0.5, 0.5, 0.5] IMAGENET_STANDARD_STD = [0.5, 0.5, 0.5] OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073] OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711]
transformers/src/transformers/utils/constants.py/0
{ "file_path": "transformers/src/transformers/utils/constants.py", "repo_id": "transformers", "token_count": 162 }
373
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Generic utilities """ import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields, is_dataclass from enum import Enum from functools import partial from typing import Any, ContextManager, Iterable, List, Tuple import numpy as np from packaging import version from .import_utils import get_torch_version, is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class cached_property(property): """ Descriptor that mimics @property but caches output in member variable. From tensorflow_datasets Built-in in functools from Python 3.8. """ def __get__(self, obj, objtype=None): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute") attr = "__cached_" + self.fget.__name__ cached = getattr(obj, attr, None) if cached is None: cached = self.fget(obj) setattr(obj, attr, cached) return cached # vendored from distutils.util def strtobool(val): """Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. """ val = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f"invalid truth value {val!r}") def infer_framework_from_repr(x): """ Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the frameworks in a smart order, without the need to import the frameworks). """ representation = str(type(x)) if representation.startswith("<class 'torch."): return "pt" elif representation.startswith("<class 'tensorflow."): return "tf" elif representation.startswith("<class 'jax"): return "jax" elif representation.startswith("<class 'numpy."): return "np" def _get_frameworks_and_test_func(x): """ Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework we can guess from the repr first, then Numpy, then the others. """ framework_to_test = { "pt": is_torch_tensor, "tf": is_tf_tensor, "jax": is_jax_tensor, "np": is_numpy_array, } preferred_framework = infer_framework_from_repr(x) # We will test this one first, then numpy, then the others. frameworks = [] if preferred_framework is None else [preferred_framework] if preferred_framework != "np": frameworks.append("np") frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, "np"]]) return {f: framework_to_test[f] for f in frameworks} def is_tensor(x): """ Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray` or `np.ndarray` in the order defined by `infer_framework_from_repr` """ # This gives us a smart order to test the frameworks with the corresponding tests. framework_to_test_func = _get_frameworks_and_test_func(x) for test_func in framework_to_test_func.values(): if test_func(x): return True # Tracers if is_torch_fx_proxy(x): return True if is_flax_available(): from jax.core import Tracer if isinstance(x, Tracer): return True return False def _is_numpy(x): return isinstance(x, np.ndarray) def is_numpy_array(x): """ Tests if `x` is a numpy array or not. """ return _is_numpy(x) def _is_torch(x): import torch return isinstance(x, torch.Tensor) def is_torch_tensor(x): """ Tests if `x` is a torch tensor or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch(x) def _is_torch_device(x): import torch return isinstance(x, torch.device) def is_torch_device(x): """ Tests if `x` is a torch device or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch_device(x) def _is_torch_dtype(x): import torch if isinstance(x, str): if hasattr(torch, x): x = getattr(torch, x) else: return False return isinstance(x, torch.dtype) def is_torch_dtype(x): """ Tests if `x` is a torch dtype or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch_dtype(x) def _is_tensorflow(x): import tensorflow as tf return isinstance(x, tf.Tensor) def is_tf_tensor(x): """ Tests if `x` is a tensorflow tensor or not. Safe to call even if tensorflow is not installed. """ return False if not is_tf_available() else _is_tensorflow(x) def _is_tf_symbolic_tensor(x): import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(tf, "is_symbolic_tensor"): return tf.is_symbolic_tensor(x) return type(x) == tf.Tensor def is_tf_symbolic_tensor(x): """ Tests if `x` is a tensorflow symbolic tensor or not (ie. not eager). Safe to call even if tensorflow is not installed. """ return False if not is_tf_available() else _is_tf_symbolic_tensor(x) def _is_jax(x): import jax.numpy as jnp # noqa: F811 return isinstance(x, jnp.ndarray) def is_jax_tensor(x): """ Tests if `x` is a Jax tensor or not. Safe to call even if jax is not installed. """ return False if not is_flax_available() else _is_jax(x) def to_py_obj(obj): """ Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list. """ framework_to_py_obj = { "pt": lambda obj: obj.detach().cpu().tolist(), "tf": lambda obj: obj.numpy().tolist(), "jax": lambda obj: np.asarray(obj).tolist(), "np": lambda obj: obj.tolist(), } if isinstance(obj, (dict, UserDict)): return {k: to_py_obj(v) for k, v in obj.items()} elif isinstance(obj, (list, tuple)): return [to_py_obj(o) for o in obj] # This gives us a smart order to test the frameworks with the corresponding tests. framework_to_test_func = _get_frameworks_and_test_func(obj) for framework, test_func in framework_to_test_func.items(): if test_func(obj): return framework_to_py_obj[framework](obj) # tolist also works on 0d np arrays if isinstance(obj, np.number): return obj.tolist() else: return obj def to_numpy(obj): """ Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array. """ framework_to_numpy = { "pt": lambda obj: obj.detach().cpu().numpy(), "tf": lambda obj: obj.numpy(), "jax": lambda obj: np.asarray(obj), "np": lambda obj: obj, } if isinstance(obj, (dict, UserDict)): return {k: to_numpy(v) for k, v in obj.items()} elif isinstance(obj, (list, tuple)): return np.array(obj) # This gives us a smart order to test the frameworks with the corresponding tests. framework_to_test_func = _get_frameworks_and_test_func(obj) for framework, test_func in framework_to_test_func.items(): if test_func(obj): return framework_to_numpy[framework](obj) return obj class ModelOutput(OrderedDict): """ Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular python dictionary. <Tip warning={true}> You can't unpack a `ModelOutput` directly. Use the [`~utils.ModelOutput.to_tuple`] method to convert it to a tuple before. </Tip> """ def __init_subclass__(cls) -> None: """Register subclasses as pytree nodes. This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with `static_graph=True` with modules that output `ModelOutput` subclasses. """ if is_torch_available(): if version.parse(get_torch_version()) >= version.parse("2.2"): _torch_pytree.register_pytree_node( cls, _model_output_flatten, partial(_model_output_unflatten, output_type=cls), serialized_type_name=f"{cls.__module__}.{cls.__name__}", ) else: _torch_pytree._register_pytree_node( cls, _model_output_flatten, partial(_model_output_unflatten, output_type=cls), ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Subclasses of ModelOutput must use the @dataclass decorator # This check is done in __init__ because the @dataclass decorator operates after __init_subclass__ # issubclass() would return True for issubclass(ModelOutput, ModelOutput) when False is needed # Just need to check that the current class is not ModelOutput is_modeloutput_subclass = self.__class__ != ModelOutput if is_modeloutput_subclass and not is_dataclass(self): raise TypeError( f"{self.__module__}.{self.__class__.__name__} is not a dataclasss." " This is a subclass of ModelOutput and so must use the @dataclass decorator." ) def __post_init__(self): """Check the ModelOutput dataclass. Only occurs if @dataclass decorator has been used. """ class_fields = fields(self) # Safety and consistency checks if not len(class_fields): raise ValueError(f"{self.__class__.__name__} has no fields.") if not all(field.default is None for field in class_fields[1:]): raise ValueError(f"{self.__class__.__name__} should not have more than one required field.") first_field = getattr(self, class_fields[0].name) other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(first_field): if isinstance(first_field, dict): iterator = first_field.items() first_field_iterator = True else: try: iterator = iter(first_field) first_field_iterator = True except TypeError: first_field_iterator = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(iterator): if ( not isinstance(element, (list, tuple)) or not len(element) == 2 or not isinstance(element[0], str) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute self[class_fields[0].name] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." ) break setattr(self, element[0], element[1]) if element[1] is not None: self[element[0]] = element[1] elif first_field is not None: self[class_fields[0].name] = first_field else: for field in class_fields: v = getattr(self, field.name) if v is not None: self[field.name] = v def __delitem__(self, *args, **kwargs): raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") def setdefault(self, *args, **kwargs): raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") def pop(self, *args, **kwargs): raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") def update(self, *args, **kwargs): raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") def __getitem__(self, k): if isinstance(k, str): inner_dict = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__(self, name, value): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(name, value) super().__setattr__(name, value) def __setitem__(self, key, value): # Will raise a KeyException if needed super().__setitem__(key, value) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(key, value) def __reduce__(self): if not is_dataclass(self): return super().__reduce__() callable, _args, *remaining = super().__reduce__() args = tuple(getattr(self, field.name) for field in fields(self)) return callable, args, *remaining def to_tuple(self) -> Tuple[Any]: """ Convert self to a tuple containing all the attributes/keys that are not `None`. """ return tuple(self[k] for k in self.keys()) if is_torch_available(): import torch.utils._pytree as _torch_pytree def _model_output_flatten(output: ModelOutput) -> Tuple[List[Any], "_torch_pytree.Context"]: return list(output.values()), list(output.keys()) def _model_output_unflatten( values: Iterable[Any], context: "_torch_pytree.Context", output_type=None, ) -> ModelOutput: return output_type(**dict(zip(context, values))) if version.parse(get_torch_version()) >= version.parse("2.2"): _torch_pytree.register_pytree_node( ModelOutput, _model_output_flatten, partial(_model_output_unflatten, output_type=ModelOutput), serialized_type_name=f"{ModelOutput.__module__}.{ModelOutput.__name__}", ) else: _torch_pytree._register_pytree_node( ModelOutput, _model_output_flatten, partial(_model_output_unflatten, output_type=ModelOutput), ) class ExplicitEnum(str, Enum): """ Enum with more explicit error message for missing values. """ @classmethod def _missing_(cls, value): raise ValueError( f"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}" ) class PaddingStrategy(ExplicitEnum): """ Possible values for the `padding` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an IDE. """ LONGEST = "longest" MAX_LENGTH = "max_length" DO_NOT_PAD = "do_not_pad" class TensorType(ExplicitEnum): """ Possible values for the `return_tensors` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an IDE. """ PYTORCH = "pt" TENSORFLOW = "tf" NUMPY = "np" JAX = "jax" class ContextManagers: """ Wrapper for `contextlib.ExitStack` which enters a collection of context managers. Adaptation of `ContextManagers` in the `fastcore` library. """ def __init__(self, context_managers: List[ContextManager]): self.context_managers = context_managers self.stack = ExitStack() def __enter__(self): for context_manager in self.context_managers: self.stack.enter_context(context_manager) def __exit__(self, *args, **kwargs): self.stack.__exit__(*args, **kwargs) def can_return_loss(model_class): """ Check if a given model can return loss. Args: model_class (`type`): The class of the model. """ framework = infer_framework(model_class) if framework == "tf": signature = inspect.signature(model_class.call) # TensorFlow models elif framework == "pt": signature = inspect.signature(model_class.forward) # PyTorch models else: signature = inspect.signature(model_class.__call__) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def find_labels(model_class): """ Find the labels used by a given model. Args: model_class (`type`): The class of the model. """ model_name = model_class.__name__ framework = infer_framework(model_class) if framework == "tf": signature = inspect.signature(model_class.call) # TensorFlow models elif framework == "pt": signature = inspect.signature(model_class.forward) # PyTorch models else: signature = inspect.signature(model_class.__call__) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def flatten_dict(d: MutableMapping, parent_key: str = "", delimiter: str = "."): """Flatten a nested dict into a single level dict.""" def _flatten_dict(d, parent_key="", delimiter="."): for k, v in d.items(): key = str(parent_key) + delimiter + str(k) if parent_key else k if v and isinstance(v, MutableMapping): yield from flatten_dict(v, key, delimiter=delimiter).items() else: yield key, v return dict(_flatten_dict(d, parent_key, delimiter)) @contextmanager def working_or_temp_dir(working_dir, use_temp_dir: bool = False): if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def transpose(array, axes=None): """ Framework-agnostic version of `numpy.transpose` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.transpose(array, axes=axes) elif is_torch_tensor(array): return array.T if axes is None else array.permute(*axes) elif is_tf_tensor(array): import tensorflow as tf return tf.transpose(array, perm=axes) elif is_jax_tensor(array): return jnp.transpose(array, axes=axes) else: raise ValueError(f"Type not supported for transpose: {type(array)}.") def reshape(array, newshape): """ Framework-agnostic version of `numpy.reshape` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.reshape(array, newshape) elif is_torch_tensor(array): return array.reshape(*newshape) elif is_tf_tensor(array): import tensorflow as tf return tf.reshape(array, newshape) elif is_jax_tensor(array): return jnp.reshape(array, newshape) else: raise ValueError(f"Type not supported for reshape: {type(array)}.") def squeeze(array, axis=None): """ Framework-agnostic version of `numpy.squeeze` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.squeeze(array, axis=axis) elif is_torch_tensor(array): return array.squeeze() if axis is None else array.squeeze(dim=axis) elif is_tf_tensor(array): import tensorflow as tf return tf.squeeze(array, axis=axis) elif is_jax_tensor(array): return jnp.squeeze(array, axis=axis) else: raise ValueError(f"Type not supported for squeeze: {type(array)}.") def expand_dims(array, axis): """ Framework-agnostic version of `numpy.expand_dims` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.expand_dims(array, axis) elif is_torch_tensor(array): return array.unsqueeze(dim=axis) elif is_tf_tensor(array): import tensorflow as tf return tf.expand_dims(array, axis=axis) elif is_jax_tensor(array): return jnp.expand_dims(array, axis=axis) else: raise ValueError(f"Type not supported for expand_dims: {type(array)}.") def tensor_size(array): """ Framework-agnostic version of `numpy.size` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.size(array) elif is_torch_tensor(array): return array.numel() elif is_tf_tensor(array): import tensorflow as tf return tf.size(array) elif is_jax_tensor(array): return array.size else: raise ValueError(f"Type not supported for tensor_size: {type(array)}.") def add_model_info_to_auto_map(auto_map, repo_id): """ Adds the information of the repo_id to a given auto map. """ for key, value in auto_map.items(): if isinstance(value, (tuple, list)): auto_map[key] = [f"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: auto_map[key] = f"{repo_id}--{value}" return auto_map def infer_framework(model_class): """ Infers the framework of a given model without using isinstance(), because we cannot guarantee that the relevant classes are imported or available. """ for base_class in inspect.getmro(model_class): module = base_class.__module__ name = base_class.__name__ if module.startswith("tensorflow") or module.startswith("keras") or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch") or name == "PreTrainedModel": return "pt" elif module.startswith("flax") or module.startswith("jax") or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f"Could not infer framework from class {model_class}.")
transformers/src/transformers/utils/generic.py/0
{ "file_path": "transformers/src/transformers/utils/generic.py", "repo_id": "transformers", "token_count": 9888 }
374
{ "example_name": "text classification", "directory_name": "{{cookiecutter.example_name|lower|replace(' ', '-')}}", "example_shortcut": "{{cookiecutter.directory_name}}", "model_class": "AutoModel", "authors": "The HuggingFace Team", "can_train_from_scratch": ["True", "False"], "with_trainer": ["True", "False"] }
transformers/templates/adding_a_new_example_script/cookiecutter.json/0
{ "file_path": "transformers/templates/adding_a_new_example_script/cookiecutter.json", "repo_id": "transformers", "token_count": 115 }
375
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # {{cookiecutter.modelname}} ## Overview The {{cookiecutter.modelname}} model was proposed in [<INSERT PAPER NAME HERE>](<INSERT PAPER LINK HERE>) by <INSERT AUTHORS HERE>. <INSERT SHORT SUMMARY HERE> The abstract from the paper is the following: *<INSERT PAPER ABSTRACT HERE>* Tips: <INSERT TIPS ABOUT MODEL HERE> This model was contributed by [INSERT YOUR HF USERNAME HERE](<https://huggingface.co/<INSERT YOUR HF USERNAME HERE>). The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>). ## {{cookiecutter.camelcase_modelname}}Config [[autodoc]] {{cookiecutter.camelcase_modelname}}Config ## {{cookiecutter.camelcase_modelname}}Tokenizer [[autodoc]] {{cookiecutter.camelcase_modelname}}Tokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## {{cookiecutter.camelcase_modelname}}TokenizerFast [[autodoc]] {{cookiecutter.camelcase_modelname}}TokenizerFast {% if "PyTorch" in cookiecutter.generate_tensorflow_pytorch_and_flax -%} ## {{cookiecutter.camelcase_modelname}}Model [[autodoc]] {{cookiecutter.camelcase_modelname}}Model - forward {% if cookiecutter.is_encoder_decoder_model == "False" %} ## {{cookiecutter.camelcase_modelname}}ForCausalLM [[autodoc]] {{cookiecutter.camelcase_modelname}}ForCausalLM - forward ## {{cookiecutter.camelcase_modelname}}ForMaskedLM [[autodoc]] {{cookiecutter.camelcase_modelname}}ForMaskedLM - forward ## {{cookiecutter.camelcase_modelname}}ForSequenceClassification [[autodoc]] transformers.{{cookiecutter.camelcase_modelname}}ForSequenceClassification - forward ## {{cookiecutter.camelcase_modelname}}ForMultipleChoice [[autodoc]] transformers.{{cookiecutter.camelcase_modelname}}ForMultipleChoice - forward ## {{cookiecutter.camelcase_modelname}}ForTokenClassification [[autodoc]] transformers.{{cookiecutter.camelcase_modelname}}ForTokenClassification - forward ## {{cookiecutter.camelcase_modelname}}ForQuestionAnswering [[autodoc]] {{cookiecutter.camelcase_modelname}}ForQuestionAnswering - forward {%- else %} ## {{cookiecutter.camelcase_modelname}}ForConditionalGeneration [[autodoc]] {{cookiecutter.camelcase_modelname}}ForConditionalGeneration - forward ## {{cookiecutter.camelcase_modelname}}ForSequenceClassification [[autodoc]] {{cookiecutter.camelcase_modelname}}ForSequenceClassification - forward ## {{cookiecutter.camelcase_modelname}}ForQuestionAnswering [[autodoc]] {{cookiecutter.camelcase_modelname}}ForQuestionAnswering - forward ## {{cookiecutter.camelcase_modelname}}ForCausalLM [[autodoc]] {{cookiecutter.camelcase_modelname}}ForCausalLM - forward {% endif -%} {% endif -%} {% if "TensorFlow" in cookiecutter.generate_tensorflow_pytorch_and_flax -%} ## TF{{cookiecutter.camelcase_modelname}}Model [[autodoc]] TF{{cookiecutter.camelcase_modelname}}Model - call {% if cookiecutter.is_encoder_decoder_model == "False" %} ## TF{{cookiecutter.camelcase_modelname}}ForMaskedLM [[autodoc]] TF{{cookiecutter.camelcase_modelname}}ForMaskedLM - call ## TF{{cookiecutter.camelcase_modelname}}ForCausalLM [[autodoc]] TF{{cookiecutter.camelcase_modelname}}ForCausalLM - call ## TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification [[autodoc]] TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification - call ## TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice [[autodoc]] TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice - call ## TF{{cookiecutter.camelcase_modelname}}ForTokenClassification [[autodoc]] TF{{cookiecutter.camelcase_modelname}}ForTokenClassification - call ## TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering [[autodoc]] TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering - call {%- else %} ## TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration [[autodoc]] TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration - call {% endif -%} {% endif -%} {% if "Flax" in cookiecutter.generate_tensorflow_pytorch_and_flax -%} ## Flax{{cookiecutter.camelcase_modelname}}Model [[autodoc]] Flax{{cookiecutter.camelcase_modelname}}Model - call {% if cookiecutter.is_encoder_decoder_model == "False" %} ## Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM [[autodoc]] Flax{{cookiecutter.camelcase_modelname}}ForMaskedLM - call ## Flax{{cookiecutter.camelcase_modelname}}ForCausalLM [[autodoc]] Flax{{cookiecutter.camelcase_modelname}}ForCausalLM - call ## Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification [[autodoc]] Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification - call ## Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice [[autodoc]] Flax{{cookiecutter.camelcase_modelname}}ForMultipleChoice - call ## Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification [[autodoc]] Flax{{cookiecutter.camelcase_modelname}}ForTokenClassification - call ## Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering [[autodoc]] Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering - call {%- else %} ## Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification [[autodoc]] Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification - call ## Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering [[autodoc]] Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering - call ## Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration [[autodoc]] Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration - call {% endif -%} {% endif -%}
transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/{{cookiecutter.lowercase_modelname}}.md/0
{ "file_path": "transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/{{cookiecutter.lowercase_modelname}}.md", "repo_id": "transformers", "token_count": 2170 }
376
# coding=utf-8 # Copyright 2021 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class AutoImageProcessorTest(unittest.TestCase): def setUp(self): transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0 def test_image_processor_from_model_shortcut(self): config = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32") self.assertIsInstance(config, CLIPImageProcessor) def test_image_processor_from_local_directory_from_key(self): with tempfile.TemporaryDirectory() as tmpdirname: processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" config_tmpfile = Path(tmpdirname) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) json.dump({"model_type": "clip"}, open(config_tmpfile, "w")) config = AutoImageProcessor.from_pretrained(tmpdirname) self.assertIsInstance(config, CLIPImageProcessor) def test_image_processor_from_local_directory_from_feature_extractor_key(self): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" config_tmpfile = Path(tmpdirname) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) json.dump({"model_type": "clip"}, open(config_tmpfile, "w")) config = AutoImageProcessor.from_pretrained(tmpdirname) self.assertIsInstance(config, CLIPImageProcessor) def test_image_processor_from_local_directory_from_config(self): with tempfile.TemporaryDirectory() as tmpdirname: model_config = CLIPConfig() # Create a dummy config file with image_proceesor_type processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" config_tmpfile = Path(tmpdirname) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) json.dump({"model_type": "clip"}, open(config_tmpfile, "w")) # remove image_processor_type to make sure config.json alone is enough to load image processor locally config_dict = AutoImageProcessor.from_pretrained(tmpdirname).to_dict() config_dict.pop("image_processor_type") config = CLIPImageProcessor(**config_dict) # save in new folder model_config.save_pretrained(tmpdirname) config.save_pretrained(tmpdirname) config = AutoImageProcessor.from_pretrained(tmpdirname) # make sure private variable is not incorrectly saved dict_as_saved = json.loads(config.to_json_string()) self.assertTrue("_processor_class" not in dict_as_saved) self.assertIsInstance(config, CLIPImageProcessor) def test_image_processor_from_local_file(self): with tempfile.TemporaryDirectory() as tmpdirname: processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) config = AutoImageProcessor.from_pretrained(processor_tmpfile) self.assertIsInstance(config, CLIPImageProcessor) def test_repo_not_found(self): with self.assertRaisesRegex( EnvironmentError, "clip-base is not a local folder and is not a valid model identifier" ): _ = AutoImageProcessor.from_pretrained("clip-base") def test_revision_not_found(self): with self.assertRaisesRegex( EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _ = AutoImageProcessor.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") def test_image_processor_not_found(self): with self.assertRaisesRegex( EnvironmentError, "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.", ): _ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model") def test_from_pretrained_dynamic_image_processor(self): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(ValueError): image_processor = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor") # If remote code is disabled, we can't load this config. with self.assertRaises(ValueError): image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=False ) image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=True ) self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor") # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir) reloaded_image_processor = AutoImageProcessor.from_pretrained(tmp_dir, trust_remote_code=True) self.assertEqual(reloaded_image_processor.__class__.__name__, "NewImageProcessor") def test_new_image_processor_registration(self): try: AutoConfig.register("custom", CustomConfig) AutoImageProcessor.register(CustomConfig, CustomImageProcessor) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(ValueError): AutoImageProcessor.register(CLIPConfig, CLIPImageProcessor) with tempfile.TemporaryDirectory() as tmpdirname: processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" config_tmpfile = Path(tmpdirname) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) json.dump({"model_type": "clip"}, open(config_tmpfile, "w")) image_processor = CustomImageProcessor.from_pretrained(tmpdirname) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir) new_image_processor = AutoImageProcessor.from_pretrained(tmp_dir) self.assertIsInstance(new_image_processor, CustomImageProcessor) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def test_from_pretrained_dynamic_image_processor_conflict(self): class NewImageProcessor(CLIPImageProcessor): is_local = True try: AutoConfig.register("custom", CustomConfig) AutoImageProcessor.register(CustomConfig, NewImageProcessor) # If remote code is not set, the default is to use local image_processor = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor") self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor") self.assertTrue(image_processor.is_local) # If remote code is disabled, we load the local one. image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=False ) self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor") self.assertTrue(image_processor.is_local) # If remote is enabled, we load from the Hub image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=True ) self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor") self.assertTrue(not hasattr(image_processor, "is_local")) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
transformers/tests/models/auto/test_image_processing_auto.py/0
{ "file_path": "transformers/tests/models/auto/test_image_processing_auto.py", "repo_id": "transformers", "token_count": 4164 }
377
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import BartTokenizer, BartTokenizerFast, BatchEncoding from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class TestTokenizationBart(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BartTokenizer rust_tokenizer_class = BartTokenizerFast test_rust_tokenizer = True from_pretrained_filter = filter_roberta_detectors # from_pretrained_kwargs = {'add_prefix_space': True} def setUp(self): super().setUp() vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): return "lower newer", "lower newer" @cached_property def default_tokenizer(self): return BartTokenizer.from_pretrained("facebook/bart-large") @cached_property def default_tokenizer_fast(self): return BartTokenizerFast.from_pretrained("facebook/bart-large") @require_torch def test_prepare_batch(self): src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer(src_text, max_length=len(expected_src_tokens), padding=True, return_tensors="pt") self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 9), batch.input_ids.shape) self.assertEqual((2, 9), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(expected_src_tokens, result) # Test that special tokens are reset @require_torch def test_prepare_batch_empty_target_text(self): src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer(src_text, padding=True, return_tensors="pt") # check if input_ids are returned and no labels self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("labels", batch) self.assertNotIn("decoder_attention_mask", batch) @require_torch def test_tokenizer_as_target_length(self): tgt_text = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: targets = tokenizer(text_target=tgt_text, max_length=32, padding="max_length", return_tensors="pt") self.assertEqual(32, targets["input_ids"].shape[1]) @require_torch def test_prepare_batch_not_longer_than_maxlen(self): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer( ["I am a small frog" * 1024, "I am a small frog"], padding=True, truncation=True, return_tensors="pt" ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual(batch.input_ids.shape, (2, 1024)) @require_torch def test_special_tokens(self): src_text = ["A long paragraph for summarization."] tgt_text = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: inputs = tokenizer(src_text, return_tensors="pt") targets = tokenizer(text_target=tgt_text, return_tensors="pt") input_ids = inputs["input_ids"] labels = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) def test_pretokenized_inputs(self): pass def test_embeded_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]), sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]), ) tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( tokens_r_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
transformers/tests/models/bart/test_tokenization_bart.py/0
{ "file_path": "transformers/tests/models/bart/test_tokenization_bart.py", "repo_id": "transformers", "token_count": 3668 }
378
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class BertGenerationEncoderTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, initializer_range=0.02, use_labels=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.use_labels = use_labels self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() return config, input_ids, input_mask, token_labels def get_config(self): return BertGenerationConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, token_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, input_mask, token_labels, **kwargs, ): model = BertGenerationEncoder(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, **kwargs, ): config.add_cross_attention = True model = BertGenerationEncoder(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, **kwargs, ): config.is_decoder = True config.add_cross_attention = True model = BertGenerationDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_causal_lm( self, config, input_ids, input_mask, token_labels, *args, ): model = BertGenerationDecoder(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs() inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BertGenerationEncoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () all_generative_model_classes = (BertGenerationDecoder,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def setUp(self): self.model_tester = BertGenerationEncoderTester(self) self.config_tester = ConfigTester(self, config_class=BertGenerationConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_bert(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() config.model_type = "bert" self.model_tester.create_and_check_model(config, input_ids, input_mask, token_labels) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) @slow def test_model_from_pretrained(self): model = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") self.assertIsNotNone(model) @require_torch class BertGenerationEncoderIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size([1, 8, 1024]) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @require_torch class BertGenerationDecoderIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size([1, 8, 50358]) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/bert_generation/test_modeling_bert_generation.py/0
{ "file_path": "transformers/tests/models/bert_generation/test_modeling_bert_generation.py", "repo_id": "transformers", "token_count": 5707 }
379
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import CLIPImageProcessor class CLIPImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class CLIPImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = CLIPImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = CLIPImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
transformers/tests/models/clip/test_image_processing_clip.py/0
{ "file_path": "transformers/tests/models/clip/test_image_processing_clip.py", "repo_id": "transformers", "token_count": 1915 }
380
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow Data2VecVision model. """ from __future__ import annotations import collections.abc import inspect import unittest import numpy as np from transformers import Data2VecVisionConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation, TFData2VecVisionModel, ) from transformers.modeling_tf_utils import keras from transformers.models.data2vec.modeling_tf_data2vec_vision import ( TF_DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class TFData2VecVisionModelTester: def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, out_indices=[0, 1, 2, 3], ): self.parent = parent self.vocab_size = 100 self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_indices = out_indices self.num_labels = num_labels def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return Data2VecVisionConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = TFData2VecVisionModel(config=config) result = model(pixel_values, training=False) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = ( self.image_size if isinstance(self.image_size, collections.abc.Iterable) else (self.image_size, self.image_size) ) patch_size = ( self.patch_size if isinstance(self.image_size, collections.abc.Iterable) else (self.patch_size, self.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.type_sequence_label_size model = TFData2VecVisionForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_for_image_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = TFData2VecVisionForSemanticSegmentation(config) result = model(pixel_values, training=False) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict def prepare_config_and_inputs_for_keras_fit(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, _, _ = config_and_inputs inputs_dict = {"pixel_values": pixel_values, "labels": tf.zeros((self.batch_size))} return config, inputs_dict @require_tf class TFData2VecVisionModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Data2VecVision does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (TFData2VecVisionModel, TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation) if is_tf_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TFData2VecVisionModel, "image-classification": TFData2VecVisionForImageClassification} if is_tf_available() else {} ) test_pruning = False test_onnx = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = TFData2VecVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Data2VecVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Data2VecVision does not use inputs_embeds") def test_inputs_embeds(self): # Data2VecVision does not use inputs_embeds pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # in Data2VecVision, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( self.model_tester.patch_size if isinstance(self.model_tester.patch_size, collections.abc.Iterable) else (self.model_tester.patch_size, self.model_tester.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # Data2VecVision has a different seq_length image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( self.model_tester.patch_size if isinstance(self.model_tester.patch_size, collections.abc.Iterable) else (self.model_tester.patch_size, self.model_tester.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # Overriding this method since the base method won't be compatible with Data2VecVision. @slow def test_keras_fit(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Since `TFData2VecVisionModel` cannot operate with the default `fit()` method. if model_class.__name__ != "TFData2VecVisionModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): # Test that model correctly compute the loss with kwargs _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() label_names = {"labels"} self.assertGreater(len(label_names), 0, msg="No matching label names found!") labels = {key: val for key, val in prepared_for_class.items() if key in label_names} inputs_minus_labels = { key: val for key, val in prepared_for_class.items() if key not in label_names } self.assertGreater(len(inputs_minus_labels), 0) model.compile(optimizer=keras.optimizers.SGD(0.0), run_eagerly=True) # Make sure the model fits without crashing regardless of where we pass the labels history1 = model.fit( prepared_for_class, validation_data=prepared_for_class, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss1 = history1.history["val_loss"][0] history2 = model.fit( inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss2 = history2.history["val_loss"][0] self.assertTrue(np.allclose(val_loss1, val_loss2, atol=1e-2, rtol=1e-3)) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-4, name="outputs", attributes=None): # We override with a slightly higher tol value, as semseg models tend to diverge a bit more super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) # Overriding this method since the base method won't be compatible with Data2VecVision. def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Since `TFData2VecVisionModel` won't have labels against which we # could compute loss. if model_class.__name__ != "TFData2VecVisionModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): # The number of elements in the loss should be the same as the number of elements in the label _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() added_label = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True)[0] ] loss_size = tf.size(added_label) # Test that model correctly compute the loss with kwargs possible_input_names = {"input_ids", "pixel_values", "input_features"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) loss = model(model_input, **prepared_for_class)[0] self.assertEqual(loss.shape, [loss_size]) # Test that model correctly compute the loss with a dict _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() loss = model(**prepared_for_class)[0] self.assertEqual(loss.shape, [loss_size]) # Test that model correctly compute the loss with a tuple label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) # Create a dictionary holding the location of the tensors in the tuple tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) # Initialize a list with their default values, update the values and convert to a tuple list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) # Send to model loss = model(tuple_input[:-1])[0] self.assertEqual(loss.shape, [loss_size]) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFData2VecVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFData2VecVisionModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( BeitImageProcessor.from_pretrained("facebook/data2vec-vision-base-ft1k") if is_vision_available() else None ) @slow def test_inference_image_classification_head_imagenet_1k(self): model = TFData2VecVisionForImageClassification.from_pretrained("facebook/data2vec-vision-base-ft1k") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = tf.convert_to_tensor([1, 1000]) self.assertEqual(logits.shape, expected_shape) expected_slice = tf.convert_to_tensor([0.3277, -0.1395, 0.0911]) tf.debugging.assert_near(logits[0, :3], expected_slice, atol=1e-4) expected_top2 = [model.config.label2id[i] for i in ["remote control, remote", "tabby, tabby cat"]] self.assertEqual(tf.nn.top_k(outputs.logits[0], 2).indices.numpy().tolist(), expected_top2)
transformers/tests/models/data2vec/test_modeling_tf_data2vec_vision.py/0
{ "file_path": "transformers/tests/models/data2vec/test_modeling_tf_data2vec_vision.py", "repo_id": "transformers", "token_count": 9984 }
381
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch DeiT model. """ import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_accelerator, require_torch_fp16, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class DeiTModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, encoder_stride=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.encoder_stride = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return DeiTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = DeiTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = DeiTForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images config.num_channels = 1 model = DeiTForMaskedImageModeling(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = DeiTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = DeiTForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as DeiT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DeiTModelTester(self) self.config_tester = ConfigTester(self, config_class=DeiTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) # special case for DeiTForImageClassificationWithTeacher model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(MODEL_MAPPING) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return config.use_cache = False config.return_dict = True for model_class in self.all_model_classes: if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue model = model_class(config) model.gradient_checkpointing_enable() model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def test_model_from_pretrained(self): for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DeiTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class DeiTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.0266, 0.1912, -1.2861]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow @require_accelerate @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): r""" A small test to make sure that inference work in half precision without any problem. """ model = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224", torch_dtype=torch.float16, device_map="auto" ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # forward pass to make sure inference works in fp16 with torch.no_grad(): _ = model(pixel_values)
transformers/tests/models/deit/test_modeling_deit.py/0
{ "file_path": "transformers/tests/models/deit/test_modeling_deit.py", "repo_id": "transformers", "token_count": 7228 }
382
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class FlaxDistilBertModelTester(unittest.TestCase): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = DistilBertConfig( vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, tie_weights_=True, ) return config, input_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class FlaxDistilBertModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def setUp(self): self.model_tester = FlaxDistilBertModelTester(self) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("distilbert-base-uncased") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs) @require_flax class FlaxDistilBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased") input_ids = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = (1, 11, 768) self.assertEqual(output.shape, expected_shape) expected_slice = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]]) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
transformers/tests/models/distilbert/test_modeling_flax_distilbert.py/0
{ "file_path": "transformers/tests/models/distilbert/test_modeling_flax_distilbert.py", "repo_id": "transformers", "token_count": 2492 }
383
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch DPT model. """ import unittest from transformers import Dinov2Config, DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MODEL_MAPPING, DPTForDepthEstimation from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class DPTModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=32, patch_size=16, use_labels=True, num_labels=3, is_training=True, hidden_size=4, num_hidden_layers=2, num_attention_heads=2, intermediate_size=8, out_features=["stage1", "stage2"], apply_layernorm=False, reshape_hidden_states=False, neck_hidden_sizes=[2, 2], fusion_hidden_size=6, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.out_features = out_features self.apply_layernorm = apply_layernorm self.reshape_hidden_states = reshape_hidden_states self.use_labels = use_labels self.num_labels = num_labels self.is_training = is_training self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size # DPT's sequence length self.seq_length = (self.image_size // self.patch_size) ** 2 + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return DPTConfig( backbone_config=self.get_backbone_config(), backbone=None, neck_hidden_sizes=self.neck_hidden_sizes, fusion_hidden_size=self.fusion_hidden_size, ) def get_backbone_config(self): return Dinov2Config( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, is_training=self.is_training, out_features=self.out_features, reshape_hidden_states=self.reshape_hidden_states, ) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as DPT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (DPTForDepthEstimation,) if is_torch_available() else () pipeline_model_mapping = {"depth-estimation": DPTForDepthEstimation} if is_torch_available() else {} test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = DPTModelTester(self) self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DPT with AutoBackbone does not have a base model and hence no input_embeddings") def test_inputs_embeds(self): pass def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) def test_training(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class in get_values(MODEL_MAPPING): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Skip the check for the backbone backbone_params = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": backbone_params = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="DPT with AutoBackbone does not have a base model and hence no input_embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="DPT with AutoBackbone does not have a base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="DPT with AutoBackbone does not have a base model") def test_save_load_fast_init_to_base(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPTForDepthEstimation.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class DPTModelIntegrationTest(unittest.TestCase): def test_inference_depth_estimation_dinov2(self): image_processor = DPTImageProcessor.from_pretrained("facebook/dpt-dinov2-small-kitti") model = DPTForDepthEstimation.from_pretrained("facebook/dpt-dinov2-small-kitti").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 576, 736)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[6.0433, 7.1636, 7.4268], [6.9047, 7.2471, 7.2355], [7.9261, 8.0631, 8.0244]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_depth_estimation_beit(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-beit-base-384") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-beit-base-384").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 384, 384)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[2669.7061, 2663.7144, 2674.9399], [2633.9326, 2650.9092, 2665.4270], [2621.8271, 2632.0129, 2637.2290]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_depth_estimation_swinv2(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-swinv2-tiny-256").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 256, 256)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[1032.7719, 1025.1886, 1030.2661], [1023.7619, 1021.0075, 1024.9121], [1022.5667, 1018.8522, 1021.4145]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/dpt/test_modeling_dpt_auto_backbone.py/0
{ "file_path": "transformers/tests/models/dpt/test_modeling_dpt_auto_backbone.py", "repo_id": "transformers", "token_count": 5415 }
384
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Encodec model. """ import copy import inspect import os import tempfile import unittest from typing import Dict, List, Tuple import numpy as np from datasets import Audio, load_dataset from transformers import AutoProcessor, EncodecConfig from transformers.testing_utils import ( is_torch_available, require_torch, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EncodecModel def prepare_inputs_dict( config, input_ids=None, input_values=None, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if input_ids is not None: encoder_dict = {"input_ids": input_ids} else: encoder_dict = {"input_values": input_values} decoder_dict = {"decoder_input_ids": decoder_input_ids} if decoder_input_ids is not None else {} return {**encoder_dict, **decoder_dict} @require_torch class EncodecModelTester: def __init__( self, parent, # `batch_size` needs to be an even number if the model has some outputs with batch dim != 0. batch_size=12, num_channels=2, is_training=False, intermediate_size=40, hidden_size=32, num_filters=8, num_residual_layers=1, upsampling_ratios=[8, 4], num_lstm_layers=1, codebook_size=64, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.is_training = is_training self.intermediate_size = intermediate_size self.hidden_size = hidden_size self.num_filters = num_filters self.num_residual_layers = num_residual_layers self.upsampling_ratios = upsampling_ratios self.num_lstm_layers = num_lstm_layers self.codebook_size = codebook_size def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.num_channels, self.intermediate_size], scale=1.0) config = self.get_config() inputs_dict = {"input_values": input_values} return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return EncodecConfig( audio_channels=self.num_channels, chunk_in_sec=None, hidden_size=self.hidden_size, num_filters=self.num_filters, num_residual_layers=self.num_residual_layers, upsampling_ratios=self.upsampling_ratios, num_lstm_layers=self.num_lstm_layers, codebook_size=self.codebook_size, ) def create_and_check_model_forward(self, config, inputs_dict): model = EncodecModel(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] result = model(input_values) self.parent.assertEqual( result.audio_values.shape, (self.batch_size, self.num_channels, self.intermediate_size) ) @require_torch class EncodecModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (EncodecModel,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False test_resize_embeddings = False pipeline_model_mapping = {"feature-extraction": EncodecModel} if is_torch_available() else {} input_name = "input_values" def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): # model does not have attention and does not support returning hidden states inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if "output_attentions" in inputs_dict: inputs_dict.pop("output_attentions") if "output_hidden_states" in inputs_dict: inputs_dict.pop("output_hidden_states") return inputs_dict def setUp(self): self.model_tester = EncodecModelTester(self) self.config_tester = ConfigTester( self, config_class=EncodecConfig, hidden_size=37, common_properties=[], has_text_modality=False ) def test_config(self): self.config_tester.run_common_tests() def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values", "padding_mask", "bandwidth"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @unittest.skip("The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics") def test_inputs_embeds(self): pass @unittest.skip("The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics") def test_model_common_attributes(self): pass @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `attention` logic") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `attention` logic") def test_torchscript_output_attentions(self): pass @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic") def test_torchscript_output_hidden_state(self): pass def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: main_input = inputs[main_input_name] model(main_input) traced_model = torch.jit.trace(model, main_input) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `attention` logic") def test_attention_outputs(self): pass def test_feed_forward_chunking(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: torch.manual_seed(0) config = copy.deepcopy(original_config) config.chunk_length_s = None config.overlap = None config.sampling_rate = 10 model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) inputs["input_values"] = inputs["input_values"].repeat(1, 1, 10) hidden_states_no_chunk = model(**inputs)[0] torch.manual_seed(0) config.chunk_length_s = 1 config.overlap = 0 config.sampling_rate = 10 model = model_class(config) model.to(torch_device) model.eval() hidden_states_with_chunk = model(**inputs)[0] self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3)) @unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic") def test_hidden_states_output(self): pass def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_determinism(first, second): # outputs are not tensors but list (since each sequence don't have the same frame_length) out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_determinism(tensor1, tensor2) else: check_determinism(first, second) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = ["conv"] ignore_init = ["lstm"] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif not any(x in name for x in ignore_init): self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_identity_shortcut(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.use_conv_shortcut = False self.model_tester.create_and_check_model_forward(config, inputs_dict) def normalize(arr): norm = np.linalg.norm(arr) normalized_arr = arr / norm return normalized_arr def compute_rmse(arr1, arr2): arr1_normalized = normalize(arr1) arr2_normalized = normalize(arr2) return np.sqrt(((arr1_normalized - arr2_normalized) ** 2).mean()) @slow @require_torch class EncodecIntegrationTest(unittest.TestCase): def test_integration_24kHz(self): expected_rmse = { "1.5": 0.0025, "24.0": 0.0015, } expected_codesums = { "1.5": [371955], "24.0": [6659962], } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "facebook/encodec_24khz" model = EncodecModel.from_pretrained(model_id).to(torch_device) processor = AutoProcessor.from_pretrained(model_id) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_sample = librispeech_dummy[-1]["audio"]["array"] inputs = processor( raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt", ).to(torch_device) for bandwidth, expected_rmse in expected_rmse.items(): with torch.no_grad(): # use max bandwith for best possible reconstruction encoder_outputs = model.encode(inputs["input_values"], bandwidth=float(bandwidth)) audio_code_sums = [a[0].sum().cpu().item() for a in encoder_outputs[0]] # make sure audio encoded codes are correct self.assertListEqual(audio_code_sums, expected_codesums[bandwidth]) audio_codes, scales = encoder_outputs.to_tuple() input_values_dec = model.decode(audio_codes, scales, inputs["padding_mask"])[0] input_values_enc_dec = model( inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth) )[-1] # make sure forward and decode gives same result self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3)) # make sure shape matches self.assertTrue(inputs["input_values"].shape == input_values_enc_dec.shape) arr = inputs["input_values"][0].cpu().numpy() arr_enc_dec = input_values_enc_dec[0].cpu().numpy() # make sure audios are more or less equal # the RMSE of two random gaussian noise vectors with ~N(0, 1) is around 1.0 rmse = compute_rmse(arr, arr_enc_dec) self.assertTrue(rmse < expected_rmse) def test_integration_48kHz(self): expected_rmse = { "3.0": 0.001, "24.0": 0.0005, } expected_codesums = { "3.0": [144259, 146765, 156435, 176871, 161971], "24.0": [1568553, 1294948, 1306190, 1464747, 1663150], } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "facebook/encodec_48khz" model = EncodecModel.from_pretrained(model_id).to(torch_device) model = model.eval() processor = AutoProcessor.from_pretrained(model_id) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_sample = librispeech_dummy[-1]["audio"]["array"] # transform mono to stereo audio_sample = np.array([audio_sample, audio_sample]) inputs = processor(raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt").to( torch_device ) for bandwidth, expected_rmse in expected_rmse.items(): with torch.no_grad(): # use max bandwith for best possible reconstruction encoder_outputs = model.encode( inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth), return_dict=False ) audio_code_sums = [a[0].sum().cpu().item() for a in encoder_outputs[0]] # make sure audio encoded codes are correct self.assertListEqual(audio_code_sums, expected_codesums[bandwidth]) audio_codes, scales = encoder_outputs input_values_dec = model.decode(audio_codes, scales, inputs["padding_mask"])[0] input_values_enc_dec = model( inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth) )[-1] # make sure forward and decode gives same result self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3)) # make sure shape matches self.assertTrue(inputs["input_values"].shape == input_values_enc_dec.shape) arr = inputs["input_values"][0].cpu().numpy() arr_enc_dec = input_values_enc_dec[0].cpu().numpy() # make sure audios are more or less equal # the RMSE of two random gaussian noise vectors with ~N(0, 1) is around 1.0 rmse = compute_rmse(arr, arr_enc_dec) self.assertTrue(rmse < expected_rmse) def test_batch_48kHz(self): expected_rmse = { "3.0": 0.001, "24.0": 0.0005, } expected_codesums = { "3.0": [ [72410, 79137, 76694, 90854, 73023, 82980, 72707, 54842], [85561, 81870, 76953, 48967, 79315, 85442, 81479, 107241], ], "24.0": [ [72410, 79137, 76694, 90854, 73023, 82980, 72707, 54842], [85561, 81870, 76953, 48967, 79315, 85442, 81479, 107241], ], } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "facebook/encodec_48khz" model = EncodecModel.from_pretrained(model_id).to(torch_device) processor = AutoProcessor.from_pretrained(model_id, chunk_length_s=1, overlap=0.01) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_samples = [ np.array([audio_sample["array"], audio_sample["array"]]) for audio_sample in librispeech_dummy[-2:]["audio"] ] inputs = processor(raw_audio=audio_samples, sampling_rate=processor.sampling_rate, return_tensors="pt") input_values = inputs["input_values"].to(torch_device) for bandwidth, expected_rmse in expected_rmse.items(): with torch.no_grad(): # use max bandwith for best possible reconstruction encoder_outputs = model.encode(input_values, bandwidth=float(bandwidth), return_dict=False) audio_code_sums_0 = [a[0][0].sum().cpu().item() for a in encoder_outputs[0]] audio_code_sums_1 = [a[0][1].sum().cpu().item() for a in encoder_outputs[0]] # make sure audio encoded codes are correct self.assertListEqual(audio_code_sums_0, expected_codesums[bandwidth][0]) self.assertListEqual(audio_code_sums_1, expected_codesums[bandwidth][1]) audio_codes, scales = encoder_outputs input_values_dec = model.decode(audio_codes, scales)[0] input_values_enc_dec = model(input_values, bandwidth=float(bandwidth))[-1] # make sure forward and decode gives same result self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3)) # make sure shape matches self.assertTrue(input_values.shape == input_values_enc_dec.shape) arr = input_values[0].cpu().numpy() arr_enc_dec = input_values_enc_dec[0].cpu().numpy() # make sure audios are more or less equal # the RMSE of two random gaussian noise vectors with ~N(0, 1) is around 1.0 rmse = compute_rmse(arr, arr_enc_dec) self.assertTrue(rmse < expected_rmse)
transformers/tests/models/encodec/test_modeling_encodec.py/0
{ "file_path": "transformers/tests/models/encodec/test_modeling_encodec.py", "repo_id": "transformers", "token_count": 11774 }
385
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Falcon model. """ import tempfile import unittest from parameterized import parameterized from transformers import ( AutoModelForCausalLM, AutoTokenizer, FalconConfig, is_torch_available, set_seed, ) from transformers.testing_utils import require_bitsandbytes, require_torch, require_torch_sdpa, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class FalconModelTester: def __init__( self, parent, batch_size=3, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return FalconConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=1, new_decoder_architecture=True, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = FalconModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = FalconModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = FalconForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = FalconForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FalconModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (FalconForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": FalconModel, "question-answering": FalconForQuestionAnswering, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = FalconModelTester(self) self.config_tester = ConfigTester(self, config_class=FalconConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_position_embedding_types(self): config, *inputs = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: config.alibi = alibi self.model_tester.create_and_check_model(config, *inputs) def test_falcon_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_falcon_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_falcon_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_past_key_values_format(self): # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(config, "use_cache"): return model = model_class(config).to(torch_device) if "use_cache" not in inputs: inputs["use_cache"] = True outputs = model(**inputs) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return num_hidden_layers = ( getattr(config, "decoder_layers", None) or getattr(config, "num_decoder_layers", None) or config.num_hidden_layers ) num_attention_heads = getattr(config, "num_kv_heads", config.num_attention_heads) embed_dim = getattr(config, "d_model", config.hidden_size) per_head_embed_dim = embed_dim // num_attention_heads past_kv = outputs["past_key_values"] self.assertEqual(len(past_kv), num_hidden_layers) batch_size, seq_length = inputs["input_ids"].shape for i in range(num_hidden_layers): if config.new_decoder_architecture: num_attention_heads = config.num_attention_heads elif config.multi_query: num_attention_heads = 1 self.assertEqual(len(past_kv[0]), 2) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @parameterized.expand([("linear",), ("dynamic",)]) def test_model_rope_scaling(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = FalconModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = FalconModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) @require_torch_sdpa @slow def test_eager_matches_sdpa_generate(self): max_new_tokens = 30 if len(self.all_generative_model_classes) == 0: self.skipTest(f"{self.__class__.__name__} tests a model that does support generate: skipping this test") for model_class in self.all_generative_model_classes: if not model_class._supports_sdpa: self.skipTest(f"{model_class.__name__} does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) model_sdpa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True, ).to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True, attn_implementation="eager", ).to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") # NOTE: This check is disabled for Falcon as the non-SDPA/SDPA implementation is in the same class (legacy reason). # for name, submodule in model_eager.named_modules(): # if "SdpaAttention" in submodule.__class__.__name__: # raise ValueError("The eager model should not have SDPA attention layers") # has_sdpa = False # for name, submodule in model_sdpa.named_modules(): # if "SdpaAttention" in submodule.__class__.__name__: # has_sdpa = True # break # if not has_sdpa: # raise ValueError("The SDPA model should have SDPA attention layers") # Just test that a large cache works as expected res_eager = model_eager.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False ) res_sdpa = model_sdpa.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False ) self.assertTrue(torch.allclose(res_eager, res_sdpa)) @require_torch class FalconLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_falcon(self): tokenizer = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b") model = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b") model.eval() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) EXPECTED_OUTPUT = ( "My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday." ) output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=19) output_str = tokenizer.batch_decode(output_ids)[0] self.assertEqual(output_str, EXPECTED_OUTPUT) @slow def test_lm_generation_big_models(self): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: tokenizer = AutoTokenizer.from_pretrained(repo) model = FalconForCausalLM.from_pretrained(repo) model.eval() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**inputs, do_sample=False, max_new_tokens=4) model.generate(**inputs, do_sample=True, max_new_tokens=4) model.generate(**inputs, num_beams=2, max_new_tokens=4) @slow def test_lm_generation_use_cache(self): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: tokenizer = AutoTokenizer.from_pretrained(repo) model = FalconForCausalLM.from_pretrained(repo) model.eval() model.to(device=torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) # Test results are the same with and without cache outputs_no_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=False) outputs_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=True) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0) @require_bitsandbytes @slow def test_batched_generation(self): tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained( "tiiuae/falcon-7b", device_map="auto", load_in_4bit=True, ) test_text = "A sequence: 1, 2" # should generate the rest of the sequence unpadded_inputs = tokenizer([test_text], return_tensors="pt").to("cuda:0") unpadded_gen_out = model.generate(**unpadded_inputs, max_new_tokens=20) unpadded_gen_text = tokenizer.batch_decode(unpadded_gen_out, skip_special_tokens=True) dummy_text = "This is a longer text " * 2 # forces left-padding on `test_text` padded_inputs = tokenizer([test_text, dummy_text], return_tensors="pt", padding=True).to("cuda:0") padded_gen_out = model.generate(**padded_inputs, max_new_tokens=20) padded_gen_text = tokenizer.batch_decode(padded_gen_out, skip_special_tokens=True) expected_output = "A sequence: 1, 2, 3, 4, 5, 6, 7, 8, " self.assertLess(unpadded_inputs.input_ids.shape[-1], padded_inputs.input_ids.shape[-1]) # left-padding exists self.assertEqual(unpadded_gen_text[0], expected_output) self.assertEqual(padded_gen_text[0], expected_output)
transformers/tests/models/falcon/test_modeling_falcon.py/0
{ "file_path": "transformers/tests/models/falcon/test_modeling_falcon.py", "repo_id": "transformers", "token_count": 11320 }
386
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch GLPN model. """ import unittest from transformers import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MODEL_MAPPING, GLPNConfig, GLPNForDepthEstimation, GLPNModel from transformers.models.glpn.modeling_glpn import GLPN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class GLPNConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class GLPNModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[16, 32, 64, 128], downsampling_rates=[1, 4, 8, 16], num_attention_heads=[1, 2, 4, 8], is_training=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, decoder_hidden_size=16, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.num_attention_heads = num_attention_heads self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.decoder_hidden_size = decoder_hidden_size self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return GLPNConfig( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, decoder_hidden_size=self.decoder_hidden_size, ) def create_and_check_model(self, config, pixel_values, labels): model = GLPNModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = expected_width = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = GLPNForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class GLPNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GLPNModel, GLPNForDepthEstimation) if is_torch_available() else () pipeline_model_mapping = ( {"depth-estimation": GLPNForDepthEstimation, "feature-extraction": GLPNModel} if is_torch_available() else {} ) test_head_masking = False test_pruning = False test_resize_embeddings = False def setUp(self): self.model_tester = GLPNModelTester(self) self.config_tester = GLPNConfigTester(self, config_class=GLPNConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) @unittest.skip("GLPN does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip("GLPN does not have get_input_embeddings method and get_output_embeddings methods") def test_model_common_attributes(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = sum(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) # verify the first attentions (first block, first layer) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) # verify the last attentions (last block, last layer) expected_seq_len = (self.model_tester.image_size // 32) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:]), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) # verify the first attentions (first block, first layer) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_encoder_blocks self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class in get_values(MODEL_MAPPING): continue # TODO: remove the following 3 lines once we have a MODEL_FOR_DEPTH_ESTIMATION_MAPPING # this can then be incorporated into _prepare_for_class in test_modeling_common.py if model_class.__name__ == "GLPNForDepthEstimation": batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, height, width], device=torch_device ).long() model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @slow def test_model_from_pretrained(self): for model_name in GLPN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GLPNModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class GLPNModelIntegrationTest(unittest.TestCase): @slow def test_inference_depth_estimation(self): image_processor = GLPNImageProcessor.from_pretrained(GLPN_PRETRAINED_MODEL_ARCHIVE_LIST[0]) model = GLPNForDepthEstimation.from_pretrained(GLPN_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the predicted depth expected_shape = torch.Size([1, 480, 640]) self.assertEqual(outputs.predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/glpn/test_modeling_glpn.py/0
{ "file_path": "transformers/tests/models/glpn/test_modeling_glpn.py", "repo_id": "transformers", "token_count": 6135 }
387
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import ( VOCAB_FILES_NAMES, GPTNeoXJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class GPTNeoXJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = GPTNeoXJapaneseTokenizer test_rust_tokenizer = False from_pretrained_kwargs = {"do_clean_text": False, "add_prefix_space": False} def setUp(self): super().setUp() vocab_tokens = [ "こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|startoftext|>", "<|endoftext|>", ] emoji_tokens = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀 self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.emoji_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["emoji_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) with open(self.emoji_file, "w") as emoji_writer: emoji_writer.write(json.dumps(emoji_tokens)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return GPTNeoXJapaneseTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "こんにちは、世界。 \nこんばんは、㔺界。😀" output_text = "こんにちは、世界。 \nこんばんは、世界。😀" return input_text, output_text def get_clean_sequence(self, tokenizer): input_text, output_text = self.get_input_output_texts(tokenizer) ids = tokenizer.encode(output_text, add_special_tokens=False) text = tokenizer.decode(ids, clean_up_tokenization_spaces=False) return text, ids def test_pretokenized_inputs(self): pass # TODO add if relevant def test_maximum_encoding_length_pair_input(self): pass # TODO add if relevant def test_maximum_encoding_length_single_input(self): pass # TODO add if relevant def test_full_tokenizer(self): tokenizer = self.get_tokenizer() # Testing tokenization input_text = "こんにちは、世界。 こんばんは、㔺界。" expected_token = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"] tokens = tokenizer.tokenize(input_text) self.assertListEqual(tokens, expected_token) # Testing conversion to ids without special tokens expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] input_ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(input_ids, expected_ids) # Testing conversion to ids with special tokens input_tokens = tokens + [tokenizer.unk_token] expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] input_ids = tokenizer.convert_tokens_to_ids(input_tokens) self.assertListEqual(input_ids, expected_ids) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("abeja/gpt-neox-japanese-2.7b") ids_1 = tokenizer.encode("ありがとう。", add_special_tokens=False) ids_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(ids_1) encoded_pair = tokenizer.build_inputs_with_special_tokens(ids_1, ids_2) assert encoded_sentence == ids_1 assert encoded_pair == ids_1 + ids_2 def test_conversion_reversible(self): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def test_padding_different_model_input_name(self): # tokenizer has no padding token pass
transformers/tests/models/gpt_neox_japanese/test_tokenization_gpt_neox_japanese.py/0
{ "file_path": "transformers/tests/models/gpt_neox_japanese/test_tokenization_gpt_neox_japanese.py", "repo_id": "transformers", "token_count": 2273 }
388
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, Allegro.pl and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import HerbertTokenizer, HerbertTokenizerFast from transformers.models.herbert.tokenization_herbert import VOCAB_FILES_NAMES from transformers.testing_utils import get_tests_dir, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class HerbertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = HerbertTokenizer rust_tokenizer_class = HerbertTokenizerFast test_rust_tokenizer = True def setUp(self): super().setUp() # Use a simpler test file without japanese/chinese characters with open(f"{get_tests_dir()}/fixtures/sample_text_no_unicode.txt", encoding="utf-8") as f_data: self._data = f_data.read().replace("\n\n", "\n").strip() vocab = [ "<s>", "</s>", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", ",</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(vocab_file=self.vocab_file, merges_file=self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [16, 17, 23] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "lower,newer" tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("allegro/herbert-base-cased") text = tokenizer.encode("konstruowanie sekwencji", add_special_tokens=False) text_2 = tokenizer.encode("konstruowanie wielu sekwencji", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [0] + text + [2] assert encoded_pair == [0] + text + [2] + text_2 + [2] @unittest.skip( "Test passes if run individually but not with the full tests (internal state of the tokenizer is modified). Will fix later" ) def test_training_new_tokenizer_with_special_tokens_change(self): pass @unittest.skip( "Test passes if run individually but not with the full tests (internal state of the tokenizer is modified). Will fix later" ) def test_training_new_tokenizer(self): pass
transformers/tests/models/herbert/test_tokenization_herbert.py/0
{ "file_path": "transformers/tests/models/herbert/test_tokenization_herbert.py", "repo_id": "transformers", "token_count": 2164 }
389
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch InstructBLIP model. """ import inspect import tempfile import unittest import numpy as np import requests from transformers import ( CONFIG_MAPPING, InstructBlipConfig, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from transformers.testing_utils import ( require_accelerate, require_bitsandbytes, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask, ) if is_torch_available(): import torch from torch import nn from transformers import InstructBlipForConditionalGeneration, InstructBlipVisionModel from transformers.models.instructblip.modeling_instructblip import INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class InstructBlipVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in case of a vision transformer, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return InstructBlipVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = InstructBlipVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class InstructBlipVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as InstructBLIP's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (InstructBlipVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = InstructBlipVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=InstructBlipVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="InstructBLIP's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="InstructBlipVisionModel is an internal building block, doesn't support standalone training") def test_training(self): pass @unittest.skip(reason="InstructBlipVisionModel is an internal building block, doesn't support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="InstructBlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="InstructBlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = InstructBlipVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class InstructBlipQFormerModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) qformer_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) qformer_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask, qformer_input_ids, qformer_attention_mask def get_config(self): return InstructBlipQFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, ) # this class is based on `OPTModelTester` found in tests/models/opt/test_modeling_opt.py class InstructBlipTextModelDecoderOnlyTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, embed_dim=16, num_labels=3, word_embed_proj_dim=16, type_sequence_label_size=2, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.embed_dim = embed_dim self.num_labels = num_labels self.type_sequence_label_size = type_sequence_label_size self.word_embed_proj_dim = word_embed_proj_dim self.is_encoder_decoder = False def prepare_config_and_inputs(self): config = self.get_config() input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3) input_ids[:, -1] = self.eos_token_id # Eos Token attention_mask = input_ids.ne(self.pad_token_id) return config, input_ids, attention_mask def get_config(self): return CONFIG_MAPPING["opt"]( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, is_encoder_decoder=False, word_embed_proj_dim=self.word_embed_proj_dim, ) # this model tester uses a decoder-only language model (OPT) class InstructBlipForConditionalGenerationDecoderOnlyModelTester: def __init__( self, parent, vision_kwargs=None, qformer_kwargs=None, text_kwargs=None, is_training=True, num_query_tokens=10 ): if vision_kwargs is None: vision_kwargs = {} if qformer_kwargs is None: qformer_kwargs = {} if text_kwargs is None: text_kwargs = {} self.parent = parent self.vision_model_tester = InstructBlipVisionModelTester(parent, **vision_kwargs) self.qformer_model_tester = InstructBlipQFormerModelTester(parent, **qformer_kwargs) self.text_model_tester = InstructBlipTextModelDecoderOnlyTester(parent, **text_kwargs) self.is_training = is_training self.num_query_tokens = num_query_tokens def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() _, _, _, qformer_input_ids, qformer_attention_mask = self.qformer_model_tester.prepare_config_and_inputs() _, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values def get_config(self): return InstructBlipConfig.from_vision_qformer_text_configs( vision_config=self.vision_model_tester.get_config(), qformer_config=self.qformer_model_tester.get_config(), text_config=self.text_model_tester.get_config(), num_query_tokens=self.num_query_tokens, ) def create_and_check_for_conditional_generation( self, config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values ): model = InstructBlipForConditionalGeneration(config).to(torch_device).eval() with torch.no_grad(): result = model( pixel_values, input_ids=input_ids, attention_mask=attention_mask, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, ) expected_seq_length = self.num_query_tokens + self.text_model_tester.seq_length self.parent.assertEqual( result.logits.shape, (self.vision_model_tester.batch_size, expected_seq_length, self.text_model_tester.vocab_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "qformer_input_ids": qformer_input_ids, "qformer_attention_mask": qformer_attention_mask, "labels": input_ids, } return config, inputs_dict @require_torch class InstructBlipForConditionalGenerationDecoderOnlyTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (InstructBlipForConditionalGeneration,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = InstructBlipForConditionalGenerationDecoderOnlyModelTester(self) def test_for_conditional_generation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="InstructBlipForConditionalGeneration doesn't support inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Tied weights are tested in individual model tests") def test_tied_weights_keys(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="InstructBlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="There's no base InstructBlipModel") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="There's no base InstructBlipModel") def test_save_load_fast_init_to_base(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_load_vision_qformer_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save InstructBlipConfig and check if we can load InstructBlipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = InstructBlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save InstructBlipConfig and check if we can load InstructBlipQFormerConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) qformer_config = InstructBlipQFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST: model = InstructBlipForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): url = "https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_vision @require_torch @slow class InstructBlipModelIntegrationTest(unittest.TestCase): @require_bitsandbytes @require_accelerate def test_inference_vicuna_7b(self): processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b") model = InstructBlipForConditionalGeneration.from_pretrained( "Salesforce/instructblip-vicuna-7b", load_in_8bit=True, low_cpu_mem_usage=True ) url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") prompt = "What is unusual about this image?" inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, torch.float16) # verify logits with torch.no_grad(): logits = model(**inputs).logits expected_slice = torch.tensor( [[-3.4902, -12.5078, 8.4141], [-5.1211, -12.1328, 7.8281], [-4.0312, -13.5938, 9.1172]], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, :3, :3].float(), expected_slice, atol=1e-3)) # verify generation outputs = model.generate(**inputs, max_new_tokens=30) generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip() expected_outputs = [2, 450, 22910, 9565, 310, 445, 1967, 338, 393, 263, 767, 338, 13977, 292, 22095, 373, 278, 1250, 310, 263, 13328, 20134, 29963, 1550, 19500, 1623, 263, 19587, 4272, 11952, 29889] # fmt: skip self.assertEqual(outputs[0].tolist(), expected_outputs) self.assertEqual( generated_text, "The unusual aspect of this image is that a man is ironing clothes on the back of a yellow SUV while driving down a busy city street.", ) def test_inference_flant5_xl(self): processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-flan-t5-xl") model = InstructBlipForConditionalGeneration.from_pretrained( "Salesforce/instructblip-flan-t5-xl", torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, ).to(torch_device) url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") prompt = "What is unusual about this image?" inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device) for k, v in inputs.items(): if torch.is_floating_point(v): inputs[k] = v.to(torch.bfloat16) outputs = model.generate( **inputs, do_sample=False, num_beams=5, max_length=256, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1, ) generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0] expected_outputs = [0, 37, 1023, 9850, 7, 3, 9, 388, 3575, 53, 4954, 30, 8, 223, 13, 3, 9, 4459, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 37, 388, 19, 5119, 3, 9, 4459, 8677, 28, 3, 9, 2756, 4459, 6177, 6, 11, 3, 88, 19, 338, 46, 3575, 53, 1476, 12, 743, 112, 2491, 5, 37, 1023, 19, 7225, 788, 12, 8, 685, 24, 34, 1267, 3, 9, 388, 3575, 53, 4954, 30, 8, 223, 13, 3, 9, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 94, 19, 487, 24, 8, 388, 19, 1119, 12, 1097, 540, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 6, 68, 34, 19, 92, 487, 24, 3, 88, 19, 1119, 12, 1097, 97, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 3, 13865, 13, 8, 1053, 21, 8, 388, 31, 7, 2874, 6, 34, 19, 964, 24, 3, 88, 19, 1119, 12, 1097, 97, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 1] # fmt: skip self.assertEqual(outputs[0].tolist(), expected_outputs) self.assertEqual( generated_text, "The image depicts a man ironing clothes on the back of a yellow van in the middle of a busy city street. The man is wearing a yellow shirt with a bright yellow tie, and he is using an ironing board to complete his task. The image is unusual due to the fact that it shows a man ironing clothes on the back of a van in the middle of a busy city street. It is possible that the man is trying to save money by doing his laundry on the back of the van, but it is also possible that he is trying to save time by doing his laundry on the back of the van in the middle of a busy city street. Regardless of the reason for the man's actions, it is clear that he is trying to save time by doing his laundry on the back of the van in the middle of a busy city street.", )
transformers/tests/models/instructblip/test_modeling_instructblip.py/0
{ "file_path": "transformers/tests/models/instructblip/test_modeling_instructblip.py", "repo_id": "transformers", "token_count": 10951 }
390
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch LeViT model. """ import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class LevitConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) class LevitModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, kernel_size=3, stride=2, padding=1, patch_size=16, hidden_sizes=[16, 32, 48], num_attention_heads=[1, 2, 3], depths=[2, 3, 4], key_dim=[8, 8, 8], drop_path_rate=0, mlp_ratio=[2, 2, 2], attention_ratio=[2, 2, 2], initializer_range=0.02, is_training=True, use_labels=True, num_labels=2, # Check ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.hidden_sizes = hidden_sizes self.num_attention_heads = num_attention_heads self.depths = depths self.key_dim = key_dim self.drop_path_rate = drop_path_rate self.patch_size = patch_size self.attention_ratio = attention_ratio self.mlp_ratio = mlp_ratio self.initializer_range = initializer_range self.down_ops = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] self.is_training = is_training self.use_labels = use_labels self.num_labels = num_labels self.initializer_range = initializer_range def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return LevitConfig( image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, ) def create_and_check_model(self, config, pixel_values, labels): model = LevitModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) image_size = (self.image_size, self.image_size) height, width = image_size[0], image_size[1] for _ in range(4): height = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1) width = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = LevitForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class LevitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Levit does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = LevitModelTester(self) self.config_tester = ConfigTester(self, config_class=LevitConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="Levit does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Levit does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="Levit does not output attentions") def test_attention_outputs(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = len(self.model_tester.depths) + 1 self.assertEqual(len(hidden_states), expected_num_layers) image_size = (self.model_tester.image_size, self.model_tester.image_size) height, width = image_size[0], image_size[1] for _ in range(4): height = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) width = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:]), [ height * width, self.model_tester.hidden_sizes[0], ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) # special case for LevitForImageClassificationWithTeacher model def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(MODEL_MAPPING) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return config.use_cache = False config.return_dict = True for model_class in self.all_model_classes: if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue model = model_class(config) model.gradient_checkpointing_enable() model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def test_model_from_pretrained(self): for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = LevitModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class LevitModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def test_inference_image_classification_head(self): model = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([1.0448, -0.3745, -1.8317]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
transformers/tests/models/levit/test_modeling_levit.py/0
{ "file_path": "transformers/tests/models/levit/test_modeling_levit.py", "repo_id": "transformers", "token_count": 7343 }
391
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_bs4 from transformers.utils import is_bs4_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bs4_available(): from transformers import MarkupLMFeatureExtractor class MarkupLMFeatureExtractionTester(unittest.TestCase): def __init__(self, parent): self.parent = parent def prepare_feat_extract_dict(self): return {} def get_html_strings(): html_string_1 = """<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR="FFFFFF"> <HR> <a href="http://google.com">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style="color:#0000FF"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>""" html_string_2 = """ <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> """ return [html_string_1, html_string_2] @require_bs4 class MarkupLMFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = MarkupLMFeatureExtractor if is_bs4_available() else None def setUp(self): self.feature_extract_tester = MarkupLMFeatureExtractionTester(self) @property def feat_extract_dict(self): return self.feature_extract_tester.prepare_feat_extract_dict() def test_call(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class() # Test not batched input html_string = get_html_strings()[0] encoding = feature_extractor(html_string) # fmt: off expected_nodes = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']] expected_xpaths = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']] # fmt: on self.assertEqual(encoding.nodes, expected_nodes) self.assertEqual(encoding.xpaths, expected_xpaths) # Test batched html_strings = get_html_strings() encoding = feature_extractor(html_strings) # fmt: off expected_nodes = expected_nodes + [['My First Heading', 'My first paragraph.']] expected_xpaths = expected_xpaths + [['/html/body/h1', '/html/body/p']] self.assertEqual(len(encoding.nodes), 2) self.assertEqual(len(encoding.xpaths), 2) self.assertEqual(encoding.nodes, expected_nodes) self.assertEqual(encoding.xpaths, expected_xpaths)
transformers/tests/models/markuplm/test_feature_extraction_markuplm.py/0
{ "file_path": "transformers/tests/models/markuplm/test_feature_extraction_markuplm.py", "repo_id": "transformers", "token_count": 1485 }
392
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Mixtral model. """ import tempfile import unittest import pytest from transformers import MixtralConfig, is_torch_available from transformers.testing_utils import ( require_flash_attn, require_torch, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MixtralForCausalLM, MixtralForSequenceClassification, MixtralModel class MixtralModelTester: # Copied from tests.models.mistral.test_modeling_mistral.MistralModelTester.__init__ def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, num_key_value_heads=2, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope # Copied from tests.models.mistral.test_modeling_mistral.MistralModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones(self.batch_size, self.seq_length)).to(torch_device) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return MixtralConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, num_experts_per_tok=2, num_local_experts=2, ) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model with Llama->Mixtral def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MixtralModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model_as_decoder with Llama->Mixtral def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = MixtralModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_for_causal_lm with Llama->Mixtral def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = MixtralForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_decoder_model_past_large_inputs with Llama->Mixtral def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = MixtralForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs_for_common with Llama->Mixtral def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch # Copied from tests.models.mistral.test_modeling_mistral.MistralModelTest with Mistral->Mixtral class MixtralModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (MixtralModel, MixtralForCausalLM, MixtralForSequenceClassification) if is_torch_available() else () ) all_generative_model_classes = (MixtralForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": MixtralModel, "text-classification": MixtralForSequenceClassification, "text-generation": MixtralForCausalLM, "zero-shot": MixtralForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = MixtralModelTester(self) self.config_tester = ConfigTester(self, config_class=MixtralConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_Mixtral_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() print(config) config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = MixtralForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_Mixtral_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = MixtralForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_Mixtral_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = MixtralForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip("Mixtral buffers include complex numbers, which breaks this test") def test_save_load_fast_init_from_base(self): pass @unittest.skip("Mixtral uses GQA on all models so the KV cache is a non standard format") def test_past_key_values_format(self): pass @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_padding_right(self): import torch for model_class in self.all_generative_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True).to( torch_device ) dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [1, 1, 1, 0]]).to(torch_device) model.generate(dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) with self.assertRaises(ValueError): _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_generate_use_cache(self): import torch max_new_tokens = 30 for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # NOTE: Mixtral apparently does not support right padding + use_cache with FA2. dummy_attention_mask[:, -1] = 1 model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) # Just test that a large cache works as expected _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False, use_cache=True, ) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_padding_right(self): self.skipTest("Mixtral flash attention does not support right padding") # Ignore copy def test_load_balancing_loss(self): r""" Let's make sure we can actually compute the loss and do a backward on it. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.num_local_experts = 8 config.output_router_logits = True input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = MixtralForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask) self.assertEqual(result.router_logits[0].shape, (91, config.num_local_experts)) torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2) # First, we make sure that adding padding tokens doesn't change the loss # loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding) pad_length = 1000 # Add padding tokens (assume that pad_token_id=1) to input_ids padding_block = torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to(torch_device) padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left padded_attention_mask = padded_input_ids.ne(1).to(torch_device) padded_result = model(padded_input_ids, attention_mask=padded_attention_mask) torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4) # We make sure that the loss of includding padding tokens != the loss without padding tokens # if attention_mask=None --> we don't exclude padding tokens include_padding_result = model(padded_input_ids, attention_mask=None) # This is to mimic torch.testing.assert_not_close self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item()) @require_torch class MixtralIntegrationTest(unittest.TestCase): @slow @require_torch_gpu def test_small_model_logits(self): model_id = "hf-internal-testing/Mixtral-tiny" dummy_input = torch.LongTensor([[0, 1, 0], [0, 1, 0]]).to(torch_device) model = MixtralForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True).to( torch_device ) # TODO: might need to tweak it in case the logits do not match on our daily runners # these logits have been obtained with the original megablocks impelmentation. EXPECTED_LOGITS = torch.Tensor( [[0.1670, 0.1620, 0.6094], [-0.8906, -0.1588, -0.6060], [0.1572, 0.1290, 0.7246]] ).to(torch_device) with torch.no_grad(): logits = model(dummy_input).logits torch.testing.assert_close(logits[0, :3, :3].half(), EXPECTED_LOGITS, atol=1e-3, rtol=1e-3) torch.testing.assert_close(logits[1, :3, :3].half(), EXPECTED_LOGITS, atol=1e-3, rtol=1e-3) @slow # @require_torch_gpu def test_small_model_logits_batched(self): model_id = "hf-internal-testing/Mixtral-tiny" dummy_input = torch.LongTensor([[0, 0, 0, 0, 0, 0, 1, 2, 3], [1, 1, 2, 3, 4, 5, 6, 7, 8]]).to(torch_device) attention_mask = dummy_input.ne(0).to(torch.long) model = MixtralForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True).to( torch_device ) # TODO: might need to tweak it in case the logits do not match on our daily runners EXPECTED_LOGITS_LEFT = torch.Tensor( [[0.1750, 0.0537, 0.7007], [0.1750, 0.0537, 0.7007], [0.1750, 0.0537, 0.7007]], ) # logits[0, -3:, -3:].half() EXPECTED_LOGITS_LEFT_UNPADDED = torch.Tensor( [[0.2212, 0.5200, -0.3816], [0.8213, -0.2313, 0.6069], [0.2664, -0.7090, 0.2468]], ) # logits[1, -3:, -3:].half() EXPECTED_LOGITS_RIGHT_UNPADDED = torch.Tensor( [[0.2205, 0.1232, -0.1611], [-0.3484, 0.3030, -1.0312], [0.0742, 0.7930, 0.7969]] ) with torch.no_grad(): logits = model(dummy_input, attention_mask=attention_mask).logits torch.testing.assert_close(logits[0, :3, :3].half(), EXPECTED_LOGITS_LEFT, atol=1e-3, rtol=1e-3) torch.testing.assert_close(logits[0, -3:, -3:].half(), EXPECTED_LOGITS_LEFT_UNPADDED, atol=1e-3, rtol=1e-3) torch.testing.assert_close(logits[1, -3:, -3:].half(), EXPECTED_LOGITS_RIGHT_UNPADDED, atol=1e-3, rtol=1e-3)
transformers/tests/models/mixtral/test_modeling_mixtral.py/0
{ "file_path": "transformers/tests/models/mixtral/test_modeling_mixtral.py", "repo_id": "transformers", "token_count": 10571 }
393
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow MobileViT model. """ from __future__ import annotations import inspect import unittest from transformers import MobileViTConfig from transformers.file_utils import is_tf_available, is_vision_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel from transformers.models.mobilevit.modeling_tf_mobilevit import TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class TFMobileViTConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "neck_hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) class TFMobileViTModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, last_hidden_size=32, num_attention_heads=4, hidden_act="silu", conv_kernel_size=3, output_stride=32, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, classifier_dropout_prob=0.1, initializer_range=0.02, is_training=True, use_labels=True, num_labels=10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.last_hidden_size = last_hidden_size self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.conv_kernel_size = conv_kernel_size self.output_stride = output_stride self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.classifier_dropout_prob = classifier_dropout_prob self.use_labels = use_labels self.is_training = is_training self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, hidden_sizes=[12, 16, 20], neck_hidden_sizes=[8, 8, 16, 16, 32, 32, 32], ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = TFMobileViTModel(config=config) result = model(pixel_values, training=False) expected_height = expected_width = self.image_size // self.output_stride self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.last_hidden_size, expected_height, expected_width) ) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = TFMobileViTForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = TFMobileViTForSemanticSegmentation(config) expected_height = expected_width = self.image_size // self.output_stride result = model(pixel_values, training=False) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, expected_height, expected_width) ) result = model(pixel_values, labels=pixel_labels, training=False) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, expected_height, expected_width) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFMobileViTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as MobileViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (TFMobileViTModel, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation) if is_tf_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TFMobileViTModel, "image-classification": TFMobileViTForImageClassification} if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False test_onnx = False def setUp(self): self.model_tester = TFMobileViTModelTester(self) self.config_tester = TFMobileViTConfigTester(self, config_class=MobileViTConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MobileViT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MobileViT does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="MobileViT does not output attentions") def test_attention_outputs(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 5 self.assertEqual(len(hidden_states), expected_num_stages) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. divisor = 2 for i in range(len(hidden_states)): self.assertListEqual( list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_dataset_conversion(self): super().test_dataset_conversion() def check_keras_fit_results(self, val_loss1, val_loss2, atol=2e-1, rtol=2e-1): self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol)) @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) @slow def test_keras_fit(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Since `TFMobileViTModel` cannot operate with the default `fit()` method. if model_class.__name__ != "TFMobileViTModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): super().test_keras_fit() # The default test_loss_computation() uses -100 as a proxy ignore_index # to test masked losses. Overridding to avoid -100 since semantic segmentation # models use `semantic_loss_ignore_index` from the config. def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # set an ignore index to correctly test the masked loss used in # `TFMobileViTForSemanticSegmentation`. if model_class.__name__ != "TFMobileViTForSemanticSegmentation": config.semantic_loss_ignore_index = 5 model = model_class(config) if getattr(model, "hf_compute_loss", None): # The number of elements in the loss should be the same as the number of elements in the label prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) added_label = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True)[0] ] expected_loss_size = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) possible_input_names = {"input_ids", "pixel_values", "input_features"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) loss = model(model_input, **prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss when we mask some positions prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) possible_input_names = {"input_ids", "pixel_values", "input_features"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) if "labels" in prepared_for_class: labels = prepared_for_class["labels"].numpy() if len(labels.shape) > 1 and labels.shape[1] != 1: # labels[0] = -100 prepared_for_class["labels"] = tf.convert_to_tensor(labels) loss = model(model_input, **prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) self.assertTrue(not np.any(np.isnan(loss.numpy()))) # Test that model correctly compute the loss with a dict prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) loss = model(prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss with a tuple prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) # Get keys that were added with the _prepare_for_class function label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) # Create a dictionary holding the location of the tensors in the tuple tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) # Initialize a list with their default values, update the values and convert to a tuple list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) # Send to model loss = model(tuple_input[:-1])[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) @slow def test_model_from_pretrained(self): for model_name in TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFMobileViTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf class TFMobileViTModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_classification_head(self): model = TFMobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small") image_processor = MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small") image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs, training=False) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-1.9364, -1.2327, -0.4653]) tf.debugging.assert_near(outputs.logits[0, :3], expected_slice, atol=1e-4, rtol=1e-04) @slow def test_inference_semantic_segmentation(self): # `from_pt` will be removed model = TFMobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image_processor = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(inputs.pixel_values, training=False) logits = outputs.logits # verify the logits expected_shape = tf.TensorShape((1, 21, 32, 32)) self.assertEqual(logits.shape, expected_shape) expected_slice = tf.constant( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ] ) tf.debugging.assert_near(logits[0, :3, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/mobilevit/test_modeling_tf_mobilevit.py/0
{ "file_path": "transformers/tests/models/mobilevit/test_modeling_tf_mobilevit.py", "repo_id": "transformers", "token_count": 7977 }
394
# coding=utf-8 # Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Musicgen model. """ import copy import inspect import math import unittest import numpy as np from transformers import ( EncodecConfig, MusicgenConfig, MusicgenDecoderConfig, MusicgenProcessor, PretrainedConfig, T5Config, ) from transformers.testing_utils import ( is_torch_available, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MusicgenForCausalLM, MusicgenForConditionalGeneration, MusicgenModel, set_seed, ) from transformers.generation import ( GenerateDecoderOnlyOutput, GenerateEncoderDecoderOutput, InfNanRemoveLogitsProcessor, LogitsProcessorList, ) def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init def prepare_musicgen_decoder_inputs_dict( config, input_ids, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.reshape(-1, config.num_codebooks, input_ids.shape[-1])[:, 0, :] attention_mask = attention_mask.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=torch_device) if encoder_attention_mask is None and encoder_hidden_states is not None: encoder_attention_mask = torch.ones(encoder_hidden_states.shape[:2], device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=torch_device) return { "input_ids": input_ids, "attention_mask": attention_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, "head_mask": head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class MusicgenDecoderTester: def __init__( self, parent, batch_size=2, seq_length=7, is_training=False, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=100, pad_token_id=99, bos_token_id=99, num_codebooks=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.num_codebooks = num_codebooks def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size * self.num_codebooks, self.seq_length], self.vocab_size) encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) config = self.get_config() inputs_dict = prepare_musicgen_decoder_inputs_dict( config, input_ids, encoder_hidden_states=encoder_hidden_states ) return config, inputs_dict def get_config(self): config = MusicgenDecoderConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, d_ff=self.intermediate_size, pad_token_id=self.pad_token_id, decoder_start_token_id=self.bos_token_id, bos_token_id=self.bos_token_id, num_codebooks=self.num_codebooks, tie_word_embeddings=False, ) return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict @require_torch class MusicgenDecoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MusicgenModel, MusicgenForCausalLM) if is_torch_available() else () greedy_sample_model_classes = ( (MusicgenForCausalLM,) if is_torch_available() else () ) # we don't want to run all the generation tests, only a specific subset pipeline_model_mapping = {} test_pruning = False test_resize_embeddings = False def setUp(self): self.model_tester = MusicgenDecoderTester(self) self.config_tester = ConfigTester(self, config_class=MusicgenDecoderConfig, hidden_size=16) def test_config(self): self.config_tester.run_common_tests() # override since we have to compute the input embeddings over codebooks def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] embed_tokens = model.get_input_embeddings() input_ids = input_ids.reshape(-1, config.num_codebooks, input_ids.shape[-1]) inputs["inputs_embeds"] = sum( [embed_tokens[codebook](input_ids[:, codebook]) for codebook in range(config.num_codebooks)] ) with torch.no_grad(): model(**inputs)[0] # override since we have embeddings / LM heads over multiple codebooks def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) first_embed = model.get_input_embeddings()[0] self.assertIsInstance(first_embed, torch.nn.Embedding) lm_heads = model.get_output_embeddings() self.assertTrue(lm_heads is None or isinstance(lm_heads[0], torch.nn.Linear)) # skip as this model doesn't support all arguments tested def test_model_outputs_equivalence(self): pass # skip as this model has multiple inputs embeds and lm heads that should not be tied def test_tie_model_weights(self): pass # skip as this model has multiple inputs embeds and lm heads that should not be tied def test_tied_weights_keys(self): pass def _get_input_ids_and_config(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict["input_ids"] # take max batch_size sequence_length = input_ids.shape[-1] input_ids = input_ids[: batch_size * config.num_codebooks, :] # generate max 3 tokens max_length = input_ids.shape[-1] + 3 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long) return config, input_ids, attention_mask, max_length @staticmethod def _get_logits_processor_and_kwargs( input_length, eos_token_id, forced_bos_token_id=None, forced_eos_token_id=None, max_length=None, diversity_penalty=None, ): process_kwargs = { "min_length": input_length + 1 if max_length is None else max_length - 1, } logits_processor = LogitsProcessorList() return process_kwargs, logits_processor # override since we don't expect the outputs of `.generate` and `.greedy_search` to be the same, since we perform # additional post-processing in the former def test_greedy_generate_dict_outputs(self): for model_class in self.greedy_sample_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids.to(torch_device), attention_mask=attention_mask.to(torch_device), max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_greedy, GenerateDecoderOnlyOutput) self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) self.assertNotIn(config.pad_token_id, output_generate) # override since we don't expect the outputs of `.generate` and `.greedy_search` to be the same, since we perform # additional post-processing in the former def test_greedy_generate_dict_outputs_use_cache(self): for model_class in self.greedy_sample_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids.to(torch_device), attention_mask=attention_mask.to(torch_device), max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_greedy, GenerateDecoderOnlyOutput) self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) # override since we don't expect the outputs of `.generate` and `.sample` to be the same, since we perform # additional post-processing in the former def test_sample_generate(self): for model_class in self.greedy_sample_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device).eval() process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=2) # check `generate()` and `sample()` are equal output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids.to(torch_device), attention_mask=attention_mask.to(torch_device), max_length=max_length, num_return_sequences=3, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertIsInstance(output_sample, torch.Tensor) self.assertIsInstance(output_generate, torch.Tensor) # override since we don't expect the outputs of `.generate` and `.sample` to be the same, since we perform # additional post-processing in the former def test_sample_generate_dict_output(self): for model_class in self.greedy_sample_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids.to(torch_device), attention_mask=attention_mask.to(torch_device), max_length=max_length, num_return_sequences=1, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_sample, GenerateDecoderOnlyOutput) self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) def test_greedy_generate_stereo_outputs(self): for model_class in self.greedy_sample_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.audio_channels = 2 model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids.to(torch_device), attention_mask=attention_mask.to(torch_device), max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_greedy, GenerateDecoderOnlyOutput) self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) self.assertNotIn(config.pad_token_id, output_generate) def prepare_musicgen_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.reshape( -1, config.decoder.num_codebooks, decoder_input_ids.shape[-1] )[:, 0, :] decoder_attention_mask = decoder_attention_mask.ne(config.decoder.pad_token_id) if head_mask is None: head_mask = torch.ones( config.text_encoder.num_hidden_layers, config.text_encoder.num_attention_heads, device=torch_device ) if decoder_head_mask is None: decoder_head_mask = torch.ones( config.decoder.num_hidden_layers, config.decoder.num_attention_heads, device=torch_device ) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones( config.decoder.num_hidden_layers, config.decoder.num_attention_heads, device=torch_device ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class MusicgenTester: def __init__( self, parent, batch_size=2, seq_length=7, is_training=False, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=100, pad_token_id=99, bos_token_id=99, num_codebooks=4, num_filters=4, codebook_size=128, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.num_codebooks = num_codebooks self.num_filters = num_filters self.codebook_size = codebook_size def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size * self.num_codebooks, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_musicgen_inputs_dict(config, input_ids, decoder_input_ids=decoder_input_ids) return config, inputs_dict def get_config(self): text_encoder_config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.intermediate_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, ) audio_encoder_config = EncodecConfig( hidden_size=self.vocab_size, compress=1, num_filters=self.num_filters, codebook_size=self.codebook_size, codebook_dim=self.vocab_size, ) decoder_config = MusicgenDecoderConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, pad_token_id=self.pad_token_id, decoder_start_token_id=self.bos_token_id, bos_token_id=self.bos_token_id, num_codebooks=self.num_codebooks, tie_word_embeddings=False, ) config = MusicgenConfig.from_sub_models_config(text_encoder_config, audio_encoder_config, decoder_config) return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict @require_torch class MusicgenTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MusicgenForConditionalGeneration,) if is_torch_available() else () greedy_sample_model_classes = (MusicgenForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = {"text-to-audio": MusicgenForConditionalGeneration} if is_torch_available() else {} test_pruning = False # training is not supported yet for MusicGen test_headmasking = False test_resize_embeddings = False # not to test torchscript as the model tester doesn't prepare `input_values` and `padding_mask` # (and `torchscript` hates `None` values). test_torchscript = False def setUp(self): self.model_tester = MusicgenTester(self) def _check_output_with_attentions(self, outputs, config, input_ids, decoder_input_ids): text_encoder_config = config.text_encoder decoder_config = config.decoder encoder_attentions = outputs["encoder_attentions"] self.assertEqual(len(encoder_attentions), text_encoder_config.num_hidden_layers) self.assertEqual( encoder_attentions[0].shape[-3:], (text_encoder_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) decoder_attentions = outputs["decoder_attentions"] num_decoder_layers = decoder_config.num_hidden_layers self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, input_ids.shape[-1]), ) def check_musicgen_model_output_attentions( self, model_class, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs, ): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_attentions=True, **kwargs, ) self._check_output_with_attentions(outputs, config, input_ids, decoder_input_ids) def check_musicgen_model_output_attentions_from_config( self, model_class, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs, ): # Similar to `check_musicgen_model_output_attentions`, but with `output_attentions` triggered from the # config file. Contrarily to most models, changing the model's config won't work -- the defaults are loaded # from the inner models' configurations. config.output_attentions = True # model config -> won't work model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, **kwargs, ) self.assertTrue( all(key not in outputs for key in ["encoder_attentions", "decoder_attentions", "cross_attentions"]) ) config.text_encoder.output_attentions = True # inner model config -> will work config.audio_encoder.output_attentions = True config.decoder.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, **kwargs, ) self._check_output_with_attentions(outputs, config, input_ids, decoder_input_ids) # override since changing `output_attentions` from the top-level model config won't work def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.check_musicgen_model_output_attentions(model_class, config, **inputs_dict) self.check_musicgen_model_output_attentions_from_config(model_class, config, **inputs_dict) # override since we have a specific forward signature for musicgen def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_ids", "attention_mask", "input_values", "padding_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) # override since changing `gradient_checkpointing` from the top-level model config won't work def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue config.text_encoder.gradient_checkpointing = True config.audio_encoder.gradient_checkpointing = True config.decoder.gradient_checkpointing = True model = model_class(config) self.assertTrue(model.is_gradient_checkpointing) # skip as this model has multiple inputs embeds and lm heads that should not be tied def test_tie_model_weights(self): pass # skip as this model has multiple inputs embeds and lm heads that should not be tied def test_tied_model_weights_key_ignore(self): pass # skip as this model has multiple inputs embeds and lm heads that should not be tied def test_tied_weights_keys(self): pass # override since changing `output_hidden_states` / `output_attentions` from the top-level model config won't work def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.text_encoder.output_hidden_states = True config.audio_encoder.output_hidden_states = True config.decoder.output_hidden_states = True config.text_encoder.output_attentions = True config.decoder.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) # override since changing `output_hidden_states` from the top-level model config won't work def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.text_encoder.output_hidden_states = True config.audio_encoder.output_hidden_states = True config.decoder.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # override since the conv layers and lstm's in encodec are exceptions def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = ["conv"] ignore_init = ["lstm"] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif not any(x in name for x in ignore_init): self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # override since we have embeddings / LM heads over multiple codebooks def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), torch.nn.Embedding) lm_heads = model.get_output_embeddings() self.assertTrue(lm_heads is None or isinstance(lm_heads[0], torch.nn.Linear)) def _get_input_ids_and_config(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict["input_ids"] # take max batch_size sequence_length = input_ids.shape[-1] input_ids = input_ids[:batch_size, :] attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long) # generate max 3 tokens decoder_input_ids = inputs_dict["decoder_input_ids"] max_length = decoder_input_ids.shape[-1] + 3 decoder_input_ids = decoder_input_ids[: batch_size * config.decoder.num_codebooks, :] return config, input_ids, attention_mask, decoder_input_ids, max_length # override since the `input_ids` cannot be used as the `decoder_input_ids` for musicgen (input / outputs are # different modalities -> different shapes) def _greedy_generate( self, model, input_ids, attention_mask, decoder_input_ids, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_process_kwargs, **model_kwargs, ) encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_greedy = model.greedy_search( decoder_input_ids, max_length=max_length, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, encoder_outputs=encoder_outputs, **model_kwargs, ) return output_greedy, output_generate # override since the `input_ids` cannot be used as the `decoder_input_ids` for musicgen (input / outputs are # different modalities -> different shapes) def _sample_generate( self, model, input_ids, attention_mask, decoder_input_ids, max_length, num_return_sequences, logits_processor, logits_warper, logits_warper_kwargs, process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, num_beams=1, max_length=max_length, num_return_sequences=num_return_sequences, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_warper_kwargs, **process_kwargs, **model_kwargs, ) torch.manual_seed(0) encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=num_return_sequences, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) # prevent flaky generation test failures logits_processor.append(InfNanRemoveLogitsProcessor()) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_sample = model.sample( decoder_input_ids.repeat_interleave(num_return_sequences, dim=0), max_length=max_length, logits_processor=logits_processor, logits_warper=logits_warper, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, encoder_outputs=encoder_outputs, **model_kwargs, ) return output_sample, output_generate @staticmethod def _get_logits_processor_and_kwargs( input_length, eos_token_id, forced_bos_token_id=None, forced_eos_token_id=None, max_length=None, diversity_penalty=None, ): process_kwargs = { "min_length": input_length + 1 if max_length is None else max_length - 1, } logits_processor = LogitsProcessorList() return process_kwargs, logits_processor def test_greedy_generate_dict_outputs(self): for model_class in self.greedy_sample_model_classes: # disable cache config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids.to(torch_device), attention_mask=attention_mask.to(torch_device), decoder_input_ids=decoder_input_ids, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_greedy, GenerateEncoderDecoderOutput) self.assertIsInstance(output_generate, GenerateEncoderDecoderOutput) self.assertNotIn(config.pad_token_id, output_generate) def test_greedy_generate_dict_outputs_use_cache(self): for model_class in self.greedy_sample_model_classes: # enable cache config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids.to(torch_device), attention_mask=attention_mask.to(torch_device), decoder_input_ids=decoder_input_ids, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_greedy, GenerateEncoderDecoderOutput) self.assertIsInstance(output_generate, GenerateEncoderDecoderOutput) def test_sample_generate(self): for model_class in self.greedy_sample_model_classes: config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device).eval() process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=2) # check `generate()` and `sample()` are equal output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids.to(torch_device), attention_mask=attention_mask.to(torch_device), decoder_input_ids=decoder_input_ids, max_length=max_length, num_return_sequences=1, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertIsInstance(output_sample, torch.Tensor) self.assertIsInstance(output_generate, torch.Tensor) def test_sample_generate_dict_output(self): for model_class in self.greedy_sample_model_classes: # disable cache config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids.to(torch_device), attention_mask=attention_mask.to(torch_device), decoder_input_ids=decoder_input_ids, max_length=max_length, num_return_sequences=3, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_sample, GenerateEncoderDecoderOutput) self.assertIsInstance(output_generate, GenerateEncoderDecoderOutput) def test_generate_without_input_ids(self): config, _, _, _, max_length = self._get_input_ids_and_config() # if no bos token id => cannot generate from None if config.bos_token_id is None: return for model_class in self.greedy_sample_model_classes: model = model_class(config).to(torch_device) model.eval() output_ids_generate = model.generate(do_sample=False, max_length=max_length, remove_invalid_values=True) self.assertIsNotNone(output_ids_generate) @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.greedy_sample_model_classes: model = model_class(config).eval().to(torch_device) model.half() # greedy model.generate(input_dict["input_ids"], attention_mask=input_dict["attention_mask"], max_new_tokens=10) # sampling model.generate( input_dict["input_ids"], attention_mask=input_dict["attention_mask"], do_sample=True, max_new_tokens=10 ) def test_greedy_generate_stereo_outputs(self): for model_class in self.greedy_sample_model_classes: config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() config.audio_channels = 2 model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids.to(torch_device), attention_mask=attention_mask.to(torch_device), decoder_input_ids=decoder_input_ids, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_greedy, GenerateEncoderDecoderOutput) self.assertIsInstance(output_generate, GenerateEncoderDecoderOutput) self.assertNotIn(config.pad_token_id, output_generate) def get_bip_bip(bip_duration=0.125, duration=0.5, sample_rate=32000): """Produces a series of 'bip bip' sounds at a given frequency.""" timesteps = np.arange(int(duration * sample_rate)) / sample_rate wav = np.cos(2 * math.pi * 440 * timesteps) time_period = (timesteps % (2 * bip_duration)) / (2 * bip_duration) envelope = time_period >= 0.5 return wav * envelope def place_dict_on_device(dict_to_place, device): for key in dict_to_place: if dict_to_place[key] is not None and isinstance(dict_to_place[key], torch.Tensor): dict_to_place[key] = dict_to_place[key].to(device) return dict_to_place @require_torch class MusicgenIntegrationTests(unittest.TestCase): @cached_property def model(self): return MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small").to(torch_device) @cached_property def processor(self): return MusicgenProcessor.from_pretrained("facebook/musicgen-small") @slow def test_logits_text_prompt(self): model = self.model processor = self.processor inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") # prepare the encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) # prepare the decoder inputs pad_token_id = model.generation_config.pad_token_id decoder_input_ids = ( torch.ones((input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long).to(torch_device) * pad_token_id ) with torch.no_grad(): logits = model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, ).logits # fmt: off EXPECTED_LOGITS = torch.tensor( [ -0.9708, -3.0149, -4.6415, -1.4754, -0.2786, -2.3523, -2.6049, -6.7467, -1.0206, -3.2984, -3.3968, -1.5108, -1.5786, -3.1493, -1.1503, -0.0545, ] ) # fmt: on self.assertTrue(logits.shape == (*decoder_input_ids.shape, model.decoder.config.vocab_size)) self.assertTrue(torch.allclose(logits[0, 0, :16].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_logits_text_audio_prompt(self): model = self.model processor = self.processor audio = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)] text = ["80s music", "Club techno"] inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt") # prepare the text encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) # prepare the audio encoder inputs input_values = inputs.input_values.to(torch_device) padding_mask = inputs.padding_mask.to(torch_device) with torch.no_grad(): logits = model( input_ids, attention_mask=attention_mask, input_values=input_values, padding_mask=padding_mask, ).logits # fmt: off EXPECTED_LOGITS = torch.tensor( [ 0.1841, -2.9324, -0.7898, 0.1857, 0.4971, -2.8685, -1.6525, -1.6541, 2.7757, -2.5942, -3.0959, -1.0120, -1.0147, -0.4605, -0.8885, 0.6820, ] ) # fmt: on self.assertTrue(logits.shape == (8, 50, 2048)) self.assertTrue(torch.allclose(logits[0, -1, :16].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_generate_unconditional_greedy(self): model = self.model # only generate 1 sample with greedy - since it's deterministic all elements of the batch will be the same unconditional_inputs = model.get_unconditional_inputs(num_samples=1) unconditional_inputs = place_dict_on_device(unconditional_inputs, device=torch_device) output_values = model.generate(**unconditional_inputs, do_sample=False, max_new_tokens=5) # fmt: off EXPECTED_VALUES = torch.tensor( [ 0.0056, 0.0064, 0.0063, 0.0054, 0.0042, 0.0033, 0.0024, 0.0015, 0.0015, 0.0010, 0.0004, -0.0012, -0.0036, -0.0055, -0.0067, -0.0071, ] ) # fmt: on self.assertTrue(output_values.shape == (1, 1, 3200)) self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4)) @slow def test_generate_unconditional_sampling(self): model = self.model # for stochastic sampling we can generate multiple outputs unconditional_inputs = model.get_unconditional_inputs(num_samples=2) unconditional_inputs = place_dict_on_device(unconditional_inputs, device=torch_device) set_seed(0) output_values = model.generate(**unconditional_inputs, do_sample=True, max_new_tokens=10) # fmt: off EXPECTED_VALUES = torch.tensor( [ -0.0099, -0.0140, 0.0079, 0.0080, -0.0046, 0.0065, -0.0068, -0.0185, 0.0105, 0.0059, 0.0329, 0.0249, -0.0204, -0.0341, -0.0465, 0.0053, ] ) # fmt: on self.assertTrue(output_values.shape == (2, 1, 4480)) self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4)) @slow def test_generate_text_prompt_greedy(self): model = self.model processor = self.processor inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") # prepare the encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) output_values = model.generate( input_ids, attention_mask=attention_mask, do_sample=False, guidance_scale=None, max_new_tokens=10 ) # fmt: off EXPECTED_VALUES = torch.tensor( [ -1.1998e-04, -2.2302e-04, 4.6296e-04, 1.0524e-03, 2.4827e-04, -4.0288e-05, -1.2468e-04, 4.9846e-05, 7.1485e-04, 4.4197e-04, ] ) # fmt: on self.assertTrue(output_values.shape == (2, 1, 4480)) self.assertTrue(torch.allclose(output_values[0, 0, :10].cpu(), EXPECTED_VALUES, atol=1e-4)) @slow def test_generate_text_prompt_greedy_with_classifier_free_guidance(self): model = self.model processor = self.processor inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") # prepare the encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) output_values = model.generate( input_ids, attention_mask=attention_mask, do_sample=False, guidance_scale=3, max_new_tokens=10 ) # fmt: off EXPECTED_VALUES = torch.tensor( [ 0.0283, 0.0246, 0.0650, 0.0640, 0.0599, 0.0711, 0.0420, 0.0112, 0.0511, 0.0746, 0.1363, 0.1213, 0.0185, -0.0578, -0.0908, 0.0443, ] ) # fmt: on self.assertTrue(output_values.shape == (2, 1, 4480)) self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4)) @slow def test_generate_text_prompt_sampling(self): model = self.model processor = self.processor inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") # prepare the encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) set_seed(0) output_values = model.generate( input_ids, attention_mask=attention_mask, do_sample=True, guidance_scale=None, max_new_tokens=10 ) # fmt: off EXPECTED_VALUES = torch.tensor( [ -0.0111, -0.0154, 0.0047, 0.0058, -0.0068, 0.0012, -0.0109, -0.0229, 0.0010, -0.0038, 0.0167, 0.0042, -0.0421, -0.0610, -0.0764, -0.0326, ] ) # fmt: on self.assertTrue(output_values.shape == (2, 1, 4480)) self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4)) @slow def test_generate_text_audio_prompt(self): model = self.model processor = self.processor audio = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)] text = ["80s music", "Club techno"] inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt") inputs = place_dict_on_device(inputs, device=torch_device) output_values = model.generate(**inputs, do_sample=False, guidance_scale=None, max_new_tokens=10) # fmt: off EXPECTED_VALUES = torch.tensor( [ -0.0036, -0.0130, -0.0261, -0.0384, -0.0557, -0.0718, -0.0680, -0.0632, -0.0529, -0.0403, -0.0289, -0.0198, -0.0136, -0.0101, -0.0095, -0.0040, ] ) # fmt: on self.assertTrue( output_values.shape == (2, 1, 36480) ) # input values take shape 32000 and we generate from there self.assertTrue(torch.allclose(output_values[0, 0, -16:].cpu(), EXPECTED_VALUES, atol=1e-4)) @require_torch class MusicgenStereoIntegrationTests(unittest.TestCase): @cached_property def model(self): return MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-stereo-small").to(torch_device) @cached_property def processor(self): return MusicgenProcessor.from_pretrained("facebook/musicgen-stereo-small") @slow def test_generate_unconditional_greedy(self): model = self.model # only generate 1 sample with greedy - since it's deterministic all elements of the batch will be the same unconditional_inputs = model.get_unconditional_inputs(num_samples=1) unconditional_inputs = place_dict_on_device(unconditional_inputs, device=torch_device) output_values = model.generate(**unconditional_inputs, do_sample=False, max_new_tokens=12) # fmt: off EXPECTED_VALUES_LEFT = torch.tensor( [ 0.0017, 0.0004, 0.0004, 0.0005, 0.0002, 0.0002, -0.0002, -0.0013, -0.0010, -0.0015, -0.0018, -0.0032, -0.0060, -0.0082, -0.0096, -0.0099, ] ) EXPECTED_VALUES_RIGHT = torch.tensor( [ 0.0038, 0.0028, 0.0031, 0.0032, 0.0031, 0.0032, 0.0030, 0.0019, 0.0021, 0.0015, 0.0009, -0.0008, -0.0040, -0.0067, -0.0087, -0.0096, ] ) # fmt: on # (bsz, channels, seq_len) self.assertTrue(output_values.shape == (1, 2, 5760)) self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT, atol=1e-4)) self.assertTrue(torch.allclose(output_values[0, 1, :16].cpu(), EXPECTED_VALUES_RIGHT, atol=1e-4)) @slow def test_generate_text_audio_prompt(self): model = self.model processor = self.processor # create stereo inputs audio = [get_bip_bip(duration=0.5)[None, :].repeat(2, 0), get_bip_bip(duration=1.0)[None, :].repeat(2, 0)] text = ["80s music", "Club techno"] inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt") inputs = place_dict_on_device(inputs, device=torch_device) output_values = model.generate(**inputs, do_sample=False, guidance_scale=3.0, max_new_tokens=12) # fmt: off EXPECTED_VALUES_LEFT = torch.tensor( [ 0.2535, 0.2008, 0.1471, 0.0896, 0.0306, -0.0200, -0.0501, -0.0728, -0.0832, -0.0856, -0.0867, -0.0884, -0.0864, -0.0866, -0.0744, -0.0430, ] ) EXPECTED_VALUES_RIGHT = torch.tensor( [ 0.1695, 0.1213, 0.0732, 0.0239, -0.0264, -0.0705, -0.0935, -0.1103, -0.1163, -0.1139, -0.1104, -0.1082, -0.1027, -0.1004, -0.0900, -0.0614, ] ) # fmt: on # (bsz, channels, seq_len) self.assertTrue(output_values.shape == (2, 2, 37760)) # input values take shape 32000 and we generate from there - we check the last (generated) values self.assertTrue(torch.allclose(output_values[0, 0, -16:].cpu(), EXPECTED_VALUES_LEFT, atol=1e-4)) self.assertTrue(torch.allclose(output_values[0, 1, -16:].cpu(), EXPECTED_VALUES_RIGHT, atol=1e-4))
transformers/tests/models/musicgen/test_modeling_musicgen.py/0
{ "file_path": "transformers/tests/models/musicgen/test_modeling_musicgen.py", "repo_id": "transformers", "token_count": 28053 }
395
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Owlv2 model. """ import inspect import os import tempfile import unittest import numpy as np import requests from transformers import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torch_fp16, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import Owlv2ForObjectDetection, Owlv2Model, Owlv2TextModel, Owlv2VisionModel from transformers.models.owlv2.modeling_owlv2 import OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import OwlViTProcessor # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTVisionModelTester with OwlViT->Owlv2 class Owlv2VisionModelTester: def __init__( self, parent, batch_size=12, image_size=32, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return Owlv2VisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = Owlv2VisionModel(config=config).to(torch_device) model.eval() pixel_values = pixel_values.to(torch.float32) with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) num_patches = (self.image_size // self.patch_size) ** 2 self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTVisionModelTest with OwlViT->Owlv2, OWL-ViT->OwlV2, OWLVIT->OWLV2 class Owlv2VisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as OWLV2 does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (Owlv2VisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = Owlv2VisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Owlv2VisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="OWLV2 does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="OwlV2 does not support training yet") def test_training(self): pass @unittest.skip(reason="OwlV2 does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Owlv2VisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="Owlv2VisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Owlv2VisionModel.from_pretrained(model_name) self.assertIsNotNone(model) # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTTextModelTester with OwlViT->Owlv2 class Owlv2TextModelTester: def __init__( self, parent, batch_size=12, num_queries=4, seq_length=16, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=12, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=16, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_queries = num_queries self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size * self.num_queries, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size * self.num_queries, self.seq_length]) if input_mask is not None: num_text, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(num_text,)) for idx, start_index in enumerate(rnd_start_indices): input_mask[idx, :start_index] = 1 input_mask[idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return Owlv2TextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = Owlv2TextModel(config=config).to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids=input_ids, attention_mask=input_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size * self.num_queries, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size * self.num_queries, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTTextModelTest with OwlViT->Owlv2, OWL-ViT->OwlV2, OWLVIT->OWLV2 class Owlv2TextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (Owlv2TextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = Owlv2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Owlv2TextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="OwlV2 does not support training yet") def test_training(self): pass @unittest.skip(reason="OwlV2 does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="OWLV2 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Owlv2TextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="Owlv2TextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Owlv2TextModel.from_pretrained(model_name) self.assertIsNotNone(model) class Owlv2ModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = Owlv2TextModelTester(parent, **text_kwargs) self.vision_model_tester = Owlv2VisionModelTester(parent, **vision_kwargs) self.is_training = is_training self.text_config = self.text_model_tester.get_config().to_dict() self.vision_config = self.vision_model_tester.get_config().to_dict() def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return Owlv2Config.from_text_vision_configs(self.text_config, self.vision_config, projection_dim=64) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = Owlv2Model(config).to(torch_device).eval() with torch.no_grad(): result = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, ) image_logits_size = ( self.vision_model_tester.batch_size, self.text_model_tester.batch_size * self.text_model_tester.num_queries, ) text_logits_size = ( self.text_model_tester.batch_size * self.text_model_tester.num_queries, self.vision_model_tester.batch_size, ) self.parent.assertEqual(result.logits_per_image.shape, image_logits_size) self.parent.assertEqual(result.logits_per_text.shape, text_logits_size) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "return_loss": False, } return config, inputs_dict @require_torch # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTModelTest with OwlViT->Owlv2, OWL-ViT->OwlV2, OWLVIT->OWLV2 class Owlv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Owlv2Model,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": Owlv2Model, "zero-shot-object-detection": Owlv2ForObjectDetection} if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = Owlv2ModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Owlv2Model does not have input/output embeddings") def test_model_common_attributes(self): pass # override as the `logit_scale` parameter initilization is different for OWLV2 def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init).to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # OWLV2 needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") loaded_model = loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save Owlv2Config and check if we can load Owlv2VisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = Owlv2VisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save Owlv2Config and check if we can load Owlv2TextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = Owlv2TextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Owlv2Model.from_pretrained(model_name) self.assertIsNotNone(model) # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTForObjectDetectionTester with OwlViT->Owlv2, OWL-ViT->OwlV2, OWLVIT->OWLV2 class Owlv2ForObjectDetectionTester: def __init__(self, parent, is_training=True): self.parent = parent self.text_model_tester = Owlv2TextModelTester(parent) self.vision_model_tester = Owlv2VisionModelTester(parent) self.is_training = is_training self.text_config = self.text_model_tester.get_config().to_dict() self.vision_config = self.vision_model_tester.get_config().to_dict() def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, pixel_values, input_ids, attention_mask def get_config(self): return Owlv2Config.from_text_vision_configs(self.text_config, self.vision_config, projection_dim=64) def create_and_check_model(self, config, pixel_values, input_ids, attention_mask): model = Owlv2ForObjectDetection(config).to(torch_device).eval() with torch.no_grad(): result = model( pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, return_dict=True, ) pred_boxes_size = ( self.vision_model_tester.batch_size, (self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2, 4, ) pred_logits_size = ( self.vision_model_tester.batch_size, (self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2, 4, ) pred_class_embeds_size = ( self.vision_model_tester.batch_size, (self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2, self.text_model_tester.hidden_size, ) self.parent.assertEqual(result.pred_boxes.shape, pred_boxes_size) self.parent.assertEqual(result.logits.shape, pred_logits_size) self.parent.assertEqual(result.class_embeds.shape, pred_class_embeds_size) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, input_ids, attention_mask = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTForObjectDetectionTest with OwlViT->Owlv2, OWL-ViT->OwlV2, OWLVIT->OWLV2 class Owlv2ForObjectDetectionTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (Owlv2ForObjectDetection,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = Owlv2ForObjectDetectionTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Owlv2Model does not have input/output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="Test_initialization is tested in individual model tests") def test_initialization(self): pass @unittest.skip(reason="Test_forward_signature is tested in individual model tests") def test_forward_signature(self): pass @unittest.skip(reason="Test_save_load_fast_init_from_base is tested in individual model tests") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="OwlV2 does not support training yet") def test_training(self): pass @unittest.skip(reason="OwlV2 does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init).to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # OWLV2 needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") loaded_model = loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @slow def test_model_from_pretrained(self): for model_name in OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Owlv2ForObjectDetection.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class Owlv2ModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "google/owlv2-base-patch16" model = Owlv2Model.from_pretrained(model_name).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=[["a photo of a cat", "a photo of a dog"]], images=image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[-6.2229, -8.2601]], device=torch_device) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3)) @slow def test_inference_object_detection(self): model_name = "google/owlv2-base-patch16" model = Owlv2ForObjectDetection.from_pretrained(model_name).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=[["a photo of a cat", "a photo of a dog"]], images=image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4))) expected_slice_logits = torch.tensor( [[-21.413497, -21.612638], [-19.008193, -19.548841], [-20.958896, -21.382694]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) expected_slice_boxes = torch.tensor( [[0.241309, 0.051896, 0.453267], [0.139474, 0.045701, 0.250660], [0.233022, 0.050479, 0.427671]], ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) @slow def test_inference_one_shot_object_detection(self): model_name = "google/owlv2-base-patch16" model = Owlv2ForObjectDetection.from_pretrained(model_name).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() query_image = prepare_img() inputs = processor( images=image, query_images=query_image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model.image_guided_detection(**inputs) num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4))) expected_slice_boxes = torch.tensor( [[0.2413, 0.0519, 0.4533], [0.1395, 0.0457, 0.2507], [0.2330, 0.0505, 0.4277]], ).to(torch_device) self.assertTrue(torch.allclose(outputs.target_pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) @slow @require_torch_accelerator @require_torch_fp16 def test_inference_one_shot_object_detection_fp16(self): model_name = "google/owlv2-base-patch16" model = Owlv2ForObjectDetection.from_pretrained(model_name, torch_dtype=torch.float16).to(torch_device) processor = OwlViTProcessor.from_pretrained(model_name) image = prepare_img() query_image = prepare_img() inputs = processor( images=image, query_images=query_image, max_length=16, padding="max_length", return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model.image_guided_detection(**inputs) # No need to check the logits, we just check inference runs fine. num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
transformers/tests/models/owlv2/test_modeling_owlv2.py/0
{ "file_path": "transformers/tests/models/owlv2/test_modeling_owlv2.py", "repo_id": "transformers", "token_count": 15766 }
396
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class FlaxRegNetModelTester(unittest.TestCase): def __init__( self, parent, batch_size=3, image_size=32, num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], is_training=True, use_labels=True, hidden_act="relu", num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.embeddings_size = embeddings_size self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return RegNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, ) def create_and_check_model(self, config, pixel_values): model = FlaxRegNetModel(config=config) result = model(pixel_values) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values): config.num_labels = self.num_labels model = FlaxRegNetForImageClassification(config=config) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class FlaxResNetModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () is_encoder_decoder = False test_head_masking = False has_attentions = False def setUp(self) -> None: self.model_tester = FlaxRegNetModelTester(self) self.config_tester = ConfigTester(self, config_class=RegNetConfig, has_text_modality=False) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @unittest.skip(reason="RegNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="RegNet does not support input and output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(pixel_values, **kwargs): return model(pixel_values=pixel_values, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_flax class FlaxRegNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="np") outputs = model(**inputs) # verify the logits expected_shape = (1, 1000) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = jnp.array([-0.4180, -1.5051, -3.4836]) self.assertTrue(jnp.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
transformers/tests/models/regnet/test_modeling_flax_regnet.py/0
{ "file_path": "transformers/tests/models/regnet/test_modeling_flax_regnet.py", "repo_id": "transformers", "token_count": 3717 }
397
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow SAM model. """ from __future__ import annotations import inspect import unittest import numpy as np import requests from transformers import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import SamProcessor, TFSamModel from transformers.modeling_tf_utils import keras if is_vision_available(): from PIL import Image class TFSamPromptEncoderTester: def __init__( self, hidden_size=32, input_image_size=24, patch_size=2, mask_input_channels=4, num_point_embeddings=4, hidden_act="gelu", ): self.hidden_size = hidden_size self.input_image_size = input_image_size self.patch_size = patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act def get_config(self): return SamPromptEncoderConfig( image_size=self.input_image_size, patch_size=self.patch_size, mask_input_channels=self.mask_input_channels, hidden_size=self.hidden_size, num_point_embeddings=self.num_point_embeddings, hidden_act=self.hidden_act, ) def prepare_config_and_inputs(self): dummy_points = floats_tensor([self.batch_size, 3, 2]) config = self.get_config() return config, dummy_points class TFSamMaskDecoderTester: def __init__( self, hidden_size=32, hidden_act="relu", mlp_dim=64, num_hidden_layers=2, num_attention_heads=4, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=32, layer_norm_eps=1e-6, ): self.hidden_size = hidden_size self.hidden_act = hidden_act self.mlp_dim = mlp_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_downsample_rate = attention_downsample_rate self.num_multimask_outputs = num_multimask_outputs self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.layer_norm_eps = layer_norm_eps def get_config(self): return SamMaskDecoderConfig( hidden_size=self.hidden_size, hidden_act=self.hidden_act, mlp_dim=self.mlp_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, attention_downsample_rate=self.attention_downsample_rate, num_multimask_outputs=self.num_multimask_outputs, iou_head_depth=self.iou_head_depth, iou_head_hidden_dim=self.iou_head_hidden_dim, layer_norm_eps=self.layer_norm_eps, ) def prepare_config_and_inputs(self): config = self.get_config() dummy_inputs = { "image_embedding": floats_tensor([self.batch_size, self.hidden_size]), } return config, dummy_inputs class TFSamModelTester: def __init__( self, parent, hidden_size=36, intermediate_size=72, projection_dim=62, output_channels=32, num_hidden_layers=2, num_attention_heads=4, num_channels=3, image_size=24, patch_size=2, hidden_act="gelu", layer_norm_eps=1e-06, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, qkv_bias=True, mlp_ratio=4.0, use_abs_pos=True, use_rel_pos=True, rel_pos_zero_init=False, window_size=14, global_attn_indexes=[2, 5, 8, 11], num_pos_feats=16, mlp_dim=None, batch_size=2, ): self.parent = parent self.image_size = image_size self.patch_size = patch_size self.output_channels = output_channels self.num_channels = num_channels self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.mlp_ratio = mlp_ratio self.use_abs_pos = use_abs_pos self.use_rel_pos = use_rel_pos self.rel_pos_zero_init = rel_pos_zero_init self.window_size = window_size self.global_attn_indexes = global_attn_indexes self.num_pos_feats = num_pos_feats self.mlp_dim = mlp_dim self.batch_size = batch_size # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 self.prompt_encoder_tester = TFSamPromptEncoderTester() self.mask_decoder_tester = TFSamMaskDecoderTester() def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): vision_config = SamVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, initializer_factor=self.initializer_factor, output_channels=self.output_channels, qkv_bias=self.qkv_bias, mlp_ratio=self.mlp_ratio, use_abs_pos=self.use_abs_pos, use_rel_pos=self.use_rel_pos, rel_pos_zero_init=self.rel_pos_zero_init, window_size=self.window_size, global_attn_indexes=self.global_attn_indexes, num_pos_feats=self.num_pos_feats, mlp_dim=self.mlp_dim, ) prompt_encoder_config = self.prompt_encoder_tester.get_config() mask_decoder_config = self.mask_decoder_tester.get_config() return SamConfig( vision_config=vision_config, prompt_encoder_config=prompt_encoder_config, mask_decoder_config=mask_decoder_config, ) def create_and_check_model(self, config, pixel_values): model = TFSamModel(config=config) result = model(pixel_values) self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3)) self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3)) def create_and_check_get_image_features(self, config, pixel_values): model = TFSamModel(config=config) result = model.get_image_embeddings(pixel_values) self.parent.assertEqual(result[0].shape, (self.output_channels, 12, 12)) def create_and_check_get_image_hidden_states(self, config, pixel_values): model = TFSamModel(config=config) result = model.vision_encoder( pixel_values, output_hidden_states=True, return_dict=True, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1) self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape) result = model.vision_encoder( pixel_values, output_hidden_states=True, return_dict=False, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1) self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFSamModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFSamModel,) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFSamModel, "mask-generation": TFSamModel} if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False # TODO: Fix me @Arthur: `run_batch_test` in `tests/test_pipeline_mixin.py` not working def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = TFSamModelTester(self) self.vision_config_tester = ConfigTester(self, config_class=SamVisionConfig, has_text_modality=False) self.prompt_encoder_config_tester = ConfigTester( self, config_class=SamPromptEncoderConfig, has_text_modality=False, num_attention_heads=12, num_hidden_layers=2, ) self.mask_decoder_config_tester = ConfigTester( self, config_class=SamMaskDecoderConfig, has_text_modality=False ) def test_config(self): self.vision_config_tester.run_common_tests() self.prompt_encoder_config_tester.run_common_tests() self.mask_decoder_config_tester.run_common_tests() @unittest.skip(reason="SAM's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Dense)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_get_image_features(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_features(*config_and_inputs) def test_image_hidden_states(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_hidden_states(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True expected_vision_attention_shape = ( self.model_tester.batch_size * self.model_tester.num_attention_heads, 196, 196, ) expected_mask_decoder_attention_shape = (self.model_tester.batch_size, 1, 144, 32) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) self.assertListEqual( list(vision_attentions[0].shape[-4:]), list(expected_vision_attention_shape), ) self.assertListEqual( list(mask_decoder_attentions[0].shape[-4:]), list(expected_mask_decoder_attention_shape), ) @unittest.skip(reason="Hidden_states is tested in create_and_check_model tests") def test_hidden_states_output(self): pass @slow def test_model_from_pretrained(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") # sam-vit-huge blows out our memory self.assertIsNotNone(model) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-4, name="outputs", attributes=None): super().check_pt_tf_outputs( tf_outputs=tf_outputs, pt_outputs=pt_outputs, model_class=model_class, tol=tol, name=name, attributes=attributes, ) def prepare_image(): img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_dog_img(): img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image @require_tf @slow class TFSamModelIntegrationTest(unittest.TestCase): def test_inference_mask_generation_no_point(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() inputs = processor(images=raw_image, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) masks = outputs.pred_masks[0, 0, 0, 0, :3] self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.4515), atol=2e-4)) self.assertTrue(np.allclose(masks.numpy(), np.array([-4.1807, -3.4949, -3.4483]), atol=1e-2)) def test_inference_mask_generation_one_point_one_bb(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_boxes = [[[650, 900, 1000, 1250]]] input_points = [[[820, 1080]]] inputs = processor(images=raw_image, input_boxes=input_boxes, input_points=input_points, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) masks = outputs.pred_masks[0, 0, 0, 0, :3] self.assertTrue(np.allclose(scores[-1], np.array(0.9566), atol=2e-4)) self.assertTrue(np.allclose(masks.numpy(), np.array([-12.7657, -12.3683, -12.5985]), atol=2e-2)) def test_inference_mask_generation_batched_points_batched_images(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_points = [ [[[820, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], [[[510, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], ] inputs = processor(images=[raw_image, raw_image], input_points=input_points, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) masks = outputs.pred_masks[0, 0, 0, 0, :3] EXPECTED_SCORES = np.array( [ [ [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], ], [ [0.3317, 0.7264, 0.7646], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], ], ] ) EXPECTED_MASKS = np.array([-2.8552, -2.7990, -2.9612]) self.assertTrue(np.allclose(scores.numpy(), EXPECTED_SCORES, atol=1e-3)) self.assertTrue(np.allclose(masks.numpy(), EXPECTED_MASKS, atol=3e-2)) def test_inference_mask_generation_one_point_one_bb_zero(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_boxes = [[[620, 900, 1000, 1255]]] input_points = [[[820, 1080]]] labels = [[0]] inputs = processor( images=raw_image, input_boxes=input_boxes, input_points=input_points, input_labels=labels, return_tensors="tf", ) outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.7894), atol=1e-4)) def test_inference_mask_generation_one_point(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_points = [[[400, 650]]] input_labels = [[1]] inputs = processor(images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[-1], np.array(0.9675), atol=1e-4)) # With no label input_points = [[[400, 650]]] inputs = processor(images=raw_image, input_points=input_points, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.9675), atol=1e-4)) def test_inference_mask_generation_two_points(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_points = [[[400, 650], [800, 650]]] input_labels = [[1, 1]] inputs = processor(images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.9762), atol=1e-4)) # no labels inputs = processor(images=raw_image, input_points=input_points, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.9762), atol=1e-4)) def test_inference_mask_generation_two_points_batched(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_points = [[[400, 650], [800, 650]], [[400, 650]]] input_labels = [[1, 1], [1]] inputs = processor( images=[raw_image, raw_image], input_points=input_points, input_labels=input_labels, return_tensors="tf" ) outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[0][-1].numpy(), np.array(0.9762), atol=1e-4)) self.assertTrue(np.allclose(scores[1][-1], np.array(0.9637), atol=1e-4)) def test_inference_mask_generation_one_box(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_boxes = [[[75, 275, 1725, 850]]] inputs = processor(images=raw_image, input_boxes=input_boxes, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.7937), atol=1e-4)) def test_inference_mask_generation_batched_image_one_point(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() raw_dog_image = prepare_dog_img() input_points = [[[820, 1080]], [[220, 470]]] inputs = processor(images=[raw_image, raw_dog_image], input_points=input_points, return_tensors="tf") outputs = model(**inputs) scores_batched = tf.squeeze(outputs.iou_scores) input_points = [[[220, 470]]] inputs = processor(images=raw_dog_image, input_points=input_points, return_tensors="tf") outputs = model(**inputs) scores_single = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores_batched[1, :].numpy(), scores_single.numpy(), atol=1e-4)) def test_inference_mask_generation_two_points_point_batch(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_points = tf.convert_to_tensor([[[400, 650]], [[220, 470]]]) # fmt: skip input_points = tf.expand_dims(input_points, 0) inputs = processor(raw_image, input_points=input_points, return_tensors="tf") outputs = model(**inputs) iou_scores = outputs.iou_scores self.assertTrue(iou_scores.shape == (1, 2, 3)) self.assertTrue( np.allclose( iou_scores.numpy(), np.array([[[0.9105, 0.9825, 0.9675], [0.7646, 0.7943, 0.7774]]]), atol=1e-4, rtol=1e-4, ) ) def test_inference_mask_generation_three_boxes_point_batch(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() # fmt: off input_boxes = tf.convert_to_tensor([[[620, 900, 1000, 1255]], [[75, 275, 1725, 850]], [[75, 275, 1725, 850]]]) EXPECTED_IOU = np.array([[[0.9773, 0.9881, 0.9522], [0.5996, 0.7661, 0.7937], [0.5996, 0.7661, 0.7937]]]) # fmt: on input_boxes = tf.expand_dims(input_boxes, 0) inputs = processor(raw_image, input_boxes=input_boxes, return_tensors="tf") outputs = model(**inputs) iou_scores = outputs.iou_scores self.assertTrue(iou_scores.shape == (1, 3, 3)) self.assertTrue(np.allclose(iou_scores.numpy(), EXPECTED_IOU, atol=1e-4, rtol=1e-4))
transformers/tests/models/sam/test_modeling_tf_sam.py/0
{ "file_path": "transformers/tests/models/sam/test_modeling_tf_sam.py", "repo_id": "transformers", "token_count": 11717 }
398
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Hubert model. """ import math import unittest import pytest from transformers import SEWDConfig, is_torch_available from transformers.testing_utils import require_soundfile, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SEWDForCTC, SEWDForSequenceClassification, SEWDModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) from transformers.models.hubert.modeling_hubert import _compute_mask_indices class SEWDModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=32, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(64, 32, 32), conv_stride=(5, 2, 1), conv_kernel=(10, 3, 1), conv_bias=False, num_conv_pos_embeddings=31, num_conv_pos_embedding_groups=2, squeeze_factor=2, max_position_embeddings=512, position_buckets=256, share_att_key=True, relative_attention=True, position_biased_input=False, pos_att_type=("p2c", "c2p"), norm_rel_ebd="layer_norm", num_hidden_layers=2, num_attention_heads=2, hidden_dropout=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.squeeze_factor = squeeze_factor self.max_position_embeddings = max_position_embeddings self.position_buckets = position_buckets self.share_att_key = share_att_key self.relative_attention = relative_attention self.position_biased_input = position_biased_input self.pos_att_type = pos_att_type self.norm_rel_ebd = norm_rel_ebd self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length // self.squeeze_factor def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return SEWDConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, squeeze_factor=self.squeeze_factor, max_position_embeddings=self.max_position_embeddings, position_buckets=self.position_buckets, share_att_key=self.share_att_key, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, norm_rel_ebd=self.norm_rel_ebd, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout=self.hidden_dropout, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, ) def create_and_check_model(self, config, input_values, attention_mask): model = SEWDModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = SEWDModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = SEWDForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = SEWDForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lengths are at least # one shorter than logit lengths to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_loss(self, config, input_values, *args): model = SEWDForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = SEWDForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = SEWDForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class SEWDModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SEWDForCTC, SEWDModel, SEWDForSequenceClassification) if is_torch_available() else () pipeline_model_mapping = ( { "audio-classification": SEWDForSequenceClassification, "automatic-speech-recognition": SEWDForCTC, "feature-extraction": SEWDModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = SEWDModelTester(self) self.config_tester = ConfigTester(self, config_class=SEWDConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) # Hubert has no inputs_embeds def test_inputs_embeds(self): pass # `input_ids` is renamed to `input_values` def test_forward_signature(self): pass # SEW cannot resize token embeddings # since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # SEW has no inputs_embeds # and thus the `get_input_embeddings` fn # is not implemented def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "quantizer.weight_proj.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = SEWDModel.from_pretrained("asapp/sew-d-tiny-100k") self.assertIsNotNone(model) @require_torch class SEWDUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) @require_torch @require_soundfile @slow class SEWDModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_inference_pretrained_batched(self): model = SEWDModel.from_pretrained("asapp/sew-d-tiny-100k").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("asapp/sew-d-tiny-100k") input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): outputs = model(input_values).last_hidden_state # expected outputs taken from the original SEW-D implementation expected_outputs_first = torch.tensor( [ [ [-0.1619, 0.6995, 0.4062, -0.1014], [-0.1364, 0.5960, 0.0952, -0.0873], [-0.1572, 0.5718, 0.4228, -0.0864], [-0.1325, 0.6823, 0.1387, -0.0871], ], [ [-0.1296, 0.4008, 0.4952, -0.1450], [-0.1152, 0.3693, 0.3037, -0.1290], [-0.1194, 0.6074, 0.3531, -0.1466], [-0.1113, 0.3135, 0.2224, -0.1338], ], ], device=torch_device, ) expected_outputs_last = torch.tensor( [ [ [-0.1577, 0.5108, 0.8553, 0.2550], [-0.1530, 0.3580, 0.6143, 0.2672], [-0.1535, 0.4954, 0.8503, 0.1387], [-0.1572, 0.3363, 0.6217, 0.1490], ], [ [-0.1338, 0.5459, 0.9607, -0.1133], [-0.1502, 0.3738, 0.7313, -0.0986], [-0.0953, 0.4708, 1.0821, -0.0944], [-0.1474, 0.3598, 0.7248, -0.0748], ], ], device=torch_device, ) expected_output_sum = 54201.0469 self.assertTrue(torch.allclose(outputs[:, :4, :4], expected_outputs_first, atol=1e-3)) self.assertTrue(torch.allclose(outputs[:, -4:, -4:], expected_outputs_last, atol=1e-3)) self.assertTrue(abs(outputs.sum() - expected_output_sum) < 1) def test_inference_ctc_batched(self): model = SEWDForCTC.from_pretrained("asapp/sew-d-tiny-100k-ft-ls100h").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("asapp/sew-d-tiny-100k-ft-ls100h", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "swet covered breon's body trickling into the titlowing closs that was the only garmened he war", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
transformers/tests/models/sew_d/test_modeling_sew_d.py/0
{ "file_path": "transformers/tests/models/sew_d/test_modeling_sew_d.py", "repo_id": "transformers", "token_count": 10461 }
399
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import json import os import tempfile import unittest from transformers.models.speech_to_text_2 import Speech2Text2Tokenizer from transformers.models.speech_to_text_2.tokenization_speech_to_text_2 import VOCAB_FILES_NAMES from ...test_tokenization_common import TokenizerTesterMixin class SpeechToTextTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = Speech2Text2Tokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = "<s> <pad> </s> <unk> here@@ a couple of@@ words for the he@@ re@@ vocab".split(" ") merges = ["he re</w> 123", "here a 1456"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.special_tokens_map = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} self.tmpdirname = tempfile.mkdtemp() self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(vocab_keys[-1], "vocab") self.assertEqual(len(vocab_keys), 14) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 14) def test_tokenizer_decode(self): tokenizer = Speech2Text2Tokenizer.from_pretrained(self.tmpdirname) # make sure @@ is correctly concatenated token_ids = [4, 6, 8, 7, 10] # ["here@@", "couple", "words", "of@@", "the"] output_string = tokenizer.decode(token_ids) self.assertTrue(output_string == "herecouple words ofthe") def test_load_no_merges_file(self): tokenizer = Speech2Text2Tokenizer.from_pretrained(self.tmpdirname) with tempfile.TemporaryDirectory() as tmp_dirname: tokenizer.save_pretrained(tmp_dirname) os.remove(os.path.join(tmp_dirname, "merges.txt")) # load tokenizer without merges file should not throw an error tokenizer = Speech2Text2Tokenizer.from_pretrained(tmp_dirname) with tempfile.TemporaryDirectory() as tmp_dirname: # save tokenizer and load again tokenizer.save_pretrained(tmp_dirname) tokenizer = Speech2Text2Tokenizer.from_pretrained(tmp_dirname) self.assertIsNotNone(tokenizer) # overwrite since merges_file is optional def test_tokenizer_slow_store_full_signature(self): if not self.test_slow_tokenizer: return signature = inspect.signature(self.tokenizer_class.__init__) tokenizer = self.get_tokenizer() for parameter_name, parameter in signature.parameters.items(): if parameter.default != inspect.Parameter.empty and parameter_name != "merges_file": self.assertIn(parameter_name, tokenizer.init_kwargs)
transformers/tests/models/speech_to_text_2/test_tokenization_speech_to_text_2.py/0
{ "file_path": "transformers/tests/models/speech_to_text_2/test_tokenization_speech_to_text_2.py", "repo_id": "transformers", "token_count": 1531 }
400
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import unittest import numpy as np import pandas as pd from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TapasConfig, TapasTokenizer, is_tf_available, ) from transformers.models.auto import get_values from transformers.testing_utils import require_tensorflow_probability, require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, ) from transformers.models.tapas.modeling_tf_tapas import ( IndexMap, ProductIndexMap, flatten, gather, range_index_map, reduce_max, reduce_mean, reduce_sum, ) class TFTapasModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, max_position_embeddings=512, type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10], type_sequence_label_size=2, positive_weight=10.0, num_aggregation_labels=4, num_labels=2, aggregation_loss_importance=0.8, use_answer_as_supervision=True, answer_loss_importance=0.001, use_normalized_answer_loss=False, huber_loss_delta=25.0, temperature=1.0, agg_temperature=1.0, use_gumbel_for_cells=False, use_gumbel_for_agg=False, average_approximation_function="ratio", cell_selection_preference=0.5, answer_loss_cutoff=100, max_num_rows=64, max_num_columns=32, average_logits_per_cell=True, select_one_column=True, allow_empty_column_selection=False, init_cell_selection_weights_to_zero=True, reset_position_index_per_cell=True, disable_per_token_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.max_position_embeddings = max_position_embeddings self.type_vocab_sizes = type_vocab_sizes self.type_sequence_label_size = type_sequence_label_size self.positive_weight = positive_weight self.num_aggregation_labels = num_aggregation_labels self.num_labels = num_labels self.aggregation_loss_importance = aggregation_loss_importance self.use_answer_as_supervision = use_answer_as_supervision self.answer_loss_importance = answer_loss_importance self.use_normalized_answer_loss = use_normalized_answer_loss self.huber_loss_delta = huber_loss_delta self.temperature = temperature self.agg_temperature = agg_temperature self.use_gumbel_for_cells = use_gumbel_for_cells self.use_gumbel_for_agg = use_gumbel_for_agg self.average_approximation_function = average_approximation_function self.cell_selection_preference = cell_selection_preference self.answer_loss_cutoff = answer_loss_cutoff self.max_num_rows = max_num_rows self.max_num_columns = max_num_columns self.average_logits_per_cell = average_logits_per_cell self.select_one_column = select_one_column self.allow_empty_column_selection = allow_empty_column_selection self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero self.reset_position_index_per_cell = reset_position_index_per_cell self.disable_per_token_loss = disable_per_token_loss self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = [] for type_vocab_size in self.type_vocab_sizes: token_type_ids.append(ids_tensor(shape=[self.batch_size, self.seq_length], vocab_size=type_vocab_size)) token_type_ids = tf.stack(token_type_ids, axis=2) sequence_labels = None token_labels = None labels = None numeric_values = None numeric_values_scale = None float_answer = None aggregation_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) labels = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) numeric_values = ids_tensor([self.batch_size, self.seq_length], vocab_size=2, dtype=tf.float32) numeric_values_scale = ids_tensor([self.batch_size, self.seq_length], vocab_size=2, dtype=tf.float32) float_answer = ids_tensor([self.batch_size], vocab_size=2, dtype=tf.float32) aggregation_labels = ids_tensor([self.batch_size], self.num_aggregation_labels) config = self.get_config() return ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) def get_config(self): return TapasConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_sizes=self.type_vocab_sizes, initializer_range=self.initializer_range, positive_weight=self.positive_weight, num_aggregation_labels=self.num_aggregation_labels, num_labels=self.num_labels, aggregation_loss_importance=self.aggregation_loss_importance, use_answer_as_supervision=self.use_answer_as_supervision, answer_loss_importance=self.answer_loss_importance, use_normalized_answer_loss=self.use_normalized_answer_loss, huber_loss_delta=self.huber_loss_delta, temperature=self.temperature, agg_temperature=self.agg_temperature, use_gumbel_for_cells=self.use_gumbel_for_cells, use_gumbel_for_agg=self.use_gumbel_for_agg, average_approximation_function=self.average_approximation_function, cell_selection_preference=self.cell_selection_preference, answer_loss_cutoff=self.answer_loss_cutoff, max_num_rows=self.max_num_rows, max_num_columns=self.max_num_columns, average_logits_per_cell=self.average_logits_per_cell, select_one_column=self.select_one_column, allow_empty_column_selection=self.allow_empty_column_selection, init_cell_selection_weights_to_zero=self.init_cell_selection_weights_to_zero, reset_position_index_per_cell=self.reset_position_index_per_cell, disable_per_token_loss=self.disable_per_token_loss, ) def create_and_check_model( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TFTapasModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) inputs.pop("attention_mask") result = model(inputs) inputs.pop("token_type_ids") result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TFTapasForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": token_labels, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): config.num_labels = self.num_labels model = TFTapasForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "labels": sequence_labels, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): # inference: without aggregation head (SQA). Model only returns logits sqa_config = copy.copy(config) sqa_config.num_aggregation_labels = 0 sqa_config.use_answer_as_supervision = False model = TFTapasForQuestionAnswering(config=sqa_config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) # inference: with aggregation head (WTQ, WikiSQL-supervised). Model returns logits and aggregation logits model = TFTapasForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) # training: can happen in 3 main ways # case 1: conversational (SQA) model = TFTapasForQuestionAnswering(config=sqa_config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": labels, } result = model(inputs) self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) # case 2: weak supervision for aggregation (WTQ) model = TFTapasForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": labels, "numeric_values": numeric_values, "numeric_values_scale": numeric_values_scale, "float_answer": float_answer, } result = model(inputs) self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) # case 3: strong supervision for aggregation (WikiSQL-supervised) wikisql_config = copy.copy(config) wikisql_config.use_answer_as_supervision = False model = TFTapasForQuestionAnswering(config=wikisql_config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": labels, "aggregation_labels": aggregation_labels, } result = model(inputs) self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tensorflow_probability @require_tf class TFTapasModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFTapasModel, TFTapasForMaskedLM, TFTapasForSequenceClassification, TFTapasForQuestionAnswering, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFTapasModel, "fill-mask": TFTapasForMaskedLM, "text-classification": TFTapasForSequenceClassification, "zero-shot": TFTapasForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING): inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) inputs_dict["aggregation_labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["numeric_values"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.float32 ) inputs_dict["numeric_values_scale"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.float32 ) inputs_dict["float_answer"] = tf.zeros(self.model_tester.batch_size, dtype=tf.float32) elif model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING), *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), ]: inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) return inputs_dict def setUp(self): self.model_tester = TFTapasModelTester(self) self.config_tester = ConfigTester(self, config_class=TapasConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") def test_dataset_conversion(self): pass @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") def test_keras_fit(self): pass @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") def test_loss_computation(self): pass def prepare_tapas_single_inputs_for_inference(): # Here we prepare a single table-question pair to test TAPAS inference on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], } queries = "Which footballer is 33 years old?" table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_inference(): # Here we prepare a batch of 2 table-question pairs to test TAPAS inference on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "How many goals does Ronaldo have?"] table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_training(): # Here we prepare a DIFFERENT batch of 2 table-question pairs to test TAPAS training on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "What's the total number of goals?"] table = pd.DataFrame.from_dict(data) answer_coordinates = [[(0, 0)], [(0, 2), (1, 2)]] answer_text = [["Lionel Messi"], ["1462"]] float_answer = [float("NaN"), float("1462")] return table, queries, answer_coordinates, answer_text, float_answer @require_tensorflow_probability @require_tf class TFTapasModelIntegrationTest(unittest.TestCase): @cached_property def default_tokenizer(self): return TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") @slow def test_inference_no_head(self): # ideally we want to test this with the weights of tapas_inter_masklm_base_reset, # but since it's not straightforward to do this with the TF 1 implementation, we test it with # the weights of the WTQ base model (i.e. tapas_wtq_wikisql_sqa_inter_masklm_base_reset) model = TFTapasModel.from_pretrained("google/tapas-base-finetuned-wtq") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the sequence output expected_slice = tf.constant( [ [ [-0.141581565, -0.599805772, 0.747186482], [-0.143664181, -0.602008104, 0.749218345], [-0.15169853, -0.603363097, 0.741370678], ] ] ) tf.debugging.assert_near(outputs.last_hidden_state[:, :3, :3], expected_slice, atol=0.0005) # test the pooled output expected_slice = tf.constant([[0.987518311, -0.970520139, -0.994303405]]) tf.debugging.assert_near(outputs.pooler_output[:, :3], expected_slice, atol=0.0005) @unittest.skip(reason="Model not available yet") def test_inference_masked_lm(self): pass # TapasForQuestionAnswering has 3 possible ways of being fine-tuned: # - conversational set-up (SQA) # - weak supervision for aggregation (WTQ, WikiSQL) # - strong supervision for aggregation (WikiSQL-supervised) # We test all of them: @slow def test_inference_question_answering_head_conversational(self): # note that google/tapas-base-finetuned-sqa should correspond to tapas_sqa_inter_masklm_base_reset model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-sqa") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = tf.TensorShape([1, 21]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [ -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -16.262585, -10004.089, 15.435196, 15.435196, 15.435196, -9990.443, -16.327433, -16.327433, -16.327433, -16.327433, -16.327433, -10004.84, ] ] ) tf.debugging.assert_near(logits, expected_slice, atol=0.015) @slow def test_inference_question_answering_head_conversational_absolute_embeddings(self): # note that google/tapas-small-finetuned-sqa should correspond to tapas_sqa_inter_masklm_small_reset # however here we test the version with absolute position embeddings model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-small-finetuned-sqa") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = tf.TensorShape([1, 21]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [ -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -18.369339, -10014.692, 17.730324, 17.730324, 17.730324, -9984.974, -18.322773, -18.322773, -18.322773, -18.322773, -18.322773, -10007.267, ] ] ) tf.debugging.assert_near(logits, expected_slice, atol=0.01) @slow def test_inference_question_answering_head_weak_supervision(self): # note that google/tapas-base-finetuned-wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") tokenizer = self.default_tokenizer # let's test on a batch table, queries = prepare_tapas_batch_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="tf") outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = tf.TensorShape([2, 28]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [-160.375504, -160.375504, -160.375504, -10072.3965, -10070.9414, -10094.9736], [-9861.6123, -9861.6123, -9861.6123, -9861.6123, -9891.01172, 146.600677], ] ) tf.debugging.assert_near(logits[:, -6:], expected_slice, atol=0.4) # test the aggregation logits logits_aggregation = outputs.logits_aggregation expected_shape = tf.TensorShape([2, 4]) tf.debugging.assert_equal(logits_aggregation.shape, expected_shape) expected_tensor = tf.constant( [[18.8545208, -9.76614857, -6.3128891, -2.93525243], [-4.05782509, 40.0351, -5.35329962, 23.3978653]] ) tf.debugging.assert_near(logits_aggregation, expected_tensor, atol=0.001) # test the predicted answer coordinates and aggregation indices EXPECTED_PREDICTED_ANSWER_COORDINATES = [[(0, 0)], [(1, 2)]] EXPECTED_PREDICTED_AGGREGATION_INDICES = [0, 1] predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions( inputs, outputs.logits, outputs.logits_aggregation ) tf.debugging.assert_equal(EXPECTED_PREDICTED_ANSWER_COORDINATES, predicted_answer_coordinates) tf.debugging.assert_equal(EXPECTED_PREDICTED_AGGREGATION_INDICES, predicted_aggregation_indices) @slow def test_training_question_answering_head_weak_supervision(self): # note that google/tapas-base-finetuned-wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") tokenizer = self.default_tokenizer # let's test on a batch table, queries, answer_coordinates, answer_text, float_answer = prepare_tapas_batch_inputs_for_training() inputs = tokenizer( table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, padding="longest", return_tensors="tf", ) # the answer should be prepared by the user float_answer = tf.constant(float_answer, dtype=tf.float32) outputs = model( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], token_type_ids=inputs["token_type_ids"], labels=inputs["labels"], numeric_values=inputs["numeric_values"], numeric_values_scale=inputs["numeric_values_scale"], float_answer=float_answer, ) # test the loss loss = outputs.loss expected_loss = tf.constant(3.3527612686157227e-08) tf.debugging.assert_near(loss, expected_loss, atol=1e-6) # test the logits on the first example logits = outputs.logits expected_shape = tf.TensorShape([2, 29]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ -160.0156, -160.0156, -160.0156, -160.0156, -160.0156, -10072.2266, -10070.8896, -10092.6006, -10092.6006, ] ) tf.debugging.assert_near(logits[0, -9:], expected_slice, atol=1e-6) # test the aggregation logits on the second example logits_aggregation = outputs.logits_aggregation expected_shape = tf.TensorShape([2, 4]) tf.debugging.assert_equal(logits_aggregation.shape, expected_shape) expected_tensor = tf.constant([-4.0538, 40.0304, -5.3554, 23.3965]) tf.debugging.assert_near(logits_aggregation[1, -4:], expected_tensor, atol=1e-4) @slow def test_inference_question_answering_head_strong_supervision(self): # note that google/tapas-base-finetuned-wikisql-supervised should correspond to tapas_wikisql_sqa_inter_masklm_base_reset model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wikisql-supervised") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = tf.TensorShape([1, 21]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [ -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -18.6185989, -10008.7969, 17.6355762, 17.6355762, 17.6355762, -10002.4404, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -10007.0977, ] ] ) tf.debugging.assert_near(logits, expected_slice, atol=0.02) # test the aggregation logits logits_aggregation = outputs.logits_aggregation expected_shape = tf.TensorShape([1, 4]) tf.debugging.assert_equal(logits_aggregation.shape, expected_shape) expected_tensor = tf.constant([[16.5659733, -3.06624889, -2.34152961, -0.970244825]]) tf.debugging.assert_near(logits_aggregation, expected_tensor, atol=0.003) @slow def test_inference_classification_head(self): # note that google/tapas-base-finetuned-tabfact should correspond to tapas_tabfact_inter_masklm_base_reset model = TFTapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the classification logits logits = outputs.logits expected_shape = tf.TensorShape([1, 2]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant([[0.795137286, 9.5572]]) tf.debugging.assert_near(logits, expected_slice, atol=0.05) # Below: tests for Tapas utilities which are defined in modeling_tf_tapas.py. # These are based on segmented_tensor_test.py of the original implementation. # URL: https://github.com/google-research/tapas/blob/master/tapas/models/segmented_tensor_test.py @require_tensorflow_probability class TFTapasUtilsTest(unittest.TestCase): def _prepare_tables(self): """Prepares two tables, both with three distinct rows. The first table has two columns: 1.0, 2.0 | 3.0 2.0, 0.0 | 1.0 1.0, 3.0 | 4.0 The second table has three columns: 1.0 | 2.0 | 3.0 2.0 | 0.0 | 1.0 1.0 | 3.0 | 4.0 Returns: SegmentedTensors with the tables. """ values = tf.constant( [ [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], ] ) row_index = IndexMap( indices=[ [[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 0, 0], [1, 1, 1], [2, 2, 2]], ], num_segments=3, batch_dims=1, ) col_index = IndexMap( indices=[ [[0, 0, 1], [0, 0, 1], [0, 0, 1]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]], ], num_segments=3, batch_dims=1, ) return values, row_index, col_index def test_product_index(self): _, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_index_proj = cell_index.project_outer(cell_index) col_index_proj = cell_index.project_inner(cell_index) ind = cell_index.indices self.assertEqual(cell_index.num_segments, 9) # Projections should give back the original indices. # we use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(row_index.indices.numpy(), row_index_proj.indices.numpy()) self.assertEqual(row_index.num_segments, row_index_proj.num_segments) self.assertEqual(row_index.batch_dims, row_index_proj.batch_dims) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(col_index.indices.numpy(), col_index_proj.indices.numpy()) self.assertEqual(col_index.batch_dims, col_index_proj.batch_dims) # The first and second "column" are identified in the first table. for i in range(3): self.assertEqual(ind[0, i, 0], ind[0, i, 1]) self.assertNotEqual(ind[0, i, 0], ind[0, i, 2]) # All rows are distinct in the first table. for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 and j != j_2: self.assertNotEqual(ind[0, i, j], ind[0, i_2, j_2]) # All cells are distinct in the second table. for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 or j != j_2: self.assertNotEqual(ind[1, i, j], ind[1, i_2, j_2]) def test_flatten(self): _, row_index, col_index = self._prepare_tables() row_index_flat = flatten(row_index) col_index_flat = flatten(col_index) shape = [3, 4, 5] batched_index = IndexMap(indices=tf.zeros(shape, dtype=tf.int32), num_segments=1, batch_dims=3) batched_index_flat = flatten(batched_index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal( row_index_flat.indices.numpy(), [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5] ) np.testing.assert_array_equal( col_index_flat.indices.numpy(), [0, 0, 1, 0, 0, 1, 0, 0, 1, 3, 4, 5, 3, 4, 5, 3, 4, 5] ) self.assertEqual(batched_index_flat.num_segments.numpy(), np.prod(shape)) np.testing.assert_array_equal(batched_index_flat.indices.numpy(), range(np.prod(shape))) def test_range_index_map(self): batch_shape = [3, 4] num_segments = 5 index = range_index_map(batch_shape, num_segments) self.assertEqual(num_segments, index.num_segments) self.assertEqual(2, index.batch_dims) indices = index.indices # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(list(indices.shape), [3, 4, 5]) for i in range(batch_shape[0]): for j in range(batch_shape[1]): # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(indices[i, j, :].numpy(), range(num_segments)) def test_reduce_sum(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_sum, _ = reduce_sum(values, row_index) col_sum, _ = reduce_sum(values, col_index) cell_sum, _ = reduce_sum(values, cell_index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose(row_sum.numpy(), [[6.0, 3.0, 8.0], [6.0, 3.0, 8.0]]) np.testing.assert_allclose(col_sum.numpy(), [[9.0, 8.0, 0.0], [4.0, 5.0, 8.0]]) np.testing.assert_allclose( cell_sum.numpy(), [[3.0, 3.0, 0.0, 2.0, 1.0, 0.0, 4.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0]], ) def test_reduce_mean(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_mean, _ = reduce_mean(values, row_index) col_mean, _ = reduce_mean(values, col_index) cell_mean, _ = reduce_mean(values, cell_index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose( row_mean.numpy(), [[6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0], [6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0]] ) np.testing.assert_allclose(col_mean.numpy(), [[9.0 / 6.0, 8.0 / 3.0, 0.0], [4.0 / 3.0, 5.0 / 3.0, 8.0 / 3.0]]) np.testing.assert_allclose( cell_mean.numpy(), [ [3.0 / 2.0, 3.0, 0.0, 2.0 / 2.0, 1.0, 0.0, 4.0 / 2.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0], ], ) def test_reduce_max(self): values = tf.convert_to_tensor([2.0, 1.0, 0.0, 3.0]) index = IndexMap(indices=tf.convert_to_tensor([0, 1, 0, 1]), num_segments=2) maximum, _ = reduce_max(values, index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(maximum.numpy(), [2, 3]) def test_reduce_sum_vectorized(self): values = tf.convert_to_tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]]) index = IndexMap(indices=tf.convert_to_tensor([0, 0, 1]), num_segments=2, batch_dims=0) sums, new_index = reduce_sum(values, index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose(sums.numpy(), [[3.0, 5.0, 7.0], [3.0, 4.0, 5.0]]) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(new_index.indices.numpy(), [0, 1]) np.testing.assert_array_equal(new_index.num_segments.numpy(), 2) np.testing.assert_array_equal(new_index.batch_dims, 0) def test_gather(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) # Compute sums and then gather. The result should have the same shape as # the original table and each element should contain the sum the values in # its cell. sums, _ = reduce_sum(values, cell_index) cell_sum = gather(sums, cell_index) assert cell_sum.shape == values.shape # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_allclose( cell_sum.numpy(), [[[3.0, 3.0, 3.0], [2.0, 2.0, 1.0], [4.0, 4.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]]], ) def test_gather_vectorized(self): values = tf.constant([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) index = IndexMap(indices=tf.convert_to_tensor([[0, 1], [1, 0]]), num_segments=2, batch_dims=1) result = gather(values, index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(result.numpy(), [[[1, 2], [3, 4]], [[7, 8], [5, 6]]])
transformers/tests/models/tapas/test_modeling_tf_tapas.py/0
{ "file_path": "transformers/tests/models/tapas/test_modeling_tf_tapas.py", "repo_id": "transformers", "token_count": 21105 }
401
# coding=utf-8 # Copyright 2023 The Intel Team Authors, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.image_transforms import PaddingMode from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import TvpImageProcessor class TvpImageProcessingTester(unittest.TestCase): def __init__( self, parent, do_resize: bool = True, size: Dict[str, int] = {"longest_edge": 40}, do_center_crop: bool = False, crop_size: Dict[str, int] = None, do_rescale: bool = False, rescale_factor: Union[int, float] = 1 / 255, do_pad: bool = True, pad_size: Dict[str, int] = {"height": 80, "width": 80}, fill: int = None, pad_mode: PaddingMode = None, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073], image_std: Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711], batch_size=2, min_resolution=40, max_resolution=80, num_channels=3, num_frames=2, ): self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.pad_size = pad_size self.fill = fill self.pad_mode = pad_mode self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.num_frames = num_frames def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "do_center_crop": self.do_center_crop, "do_pad": self.do_pad, "pad_size": self.pad_size, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to TvpImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: return (int(self.pad_size["height"]), int(self.pad_size["width"])) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_video_inputs( batch_size=self.batch_size, num_frames=self.num_frames, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class TvpImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = TvpImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = TvpImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "pad_size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"longest_edge": 40}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size={"longest_edge": 12}) self.assertEqual(image_processor.size, {"longest_edge": 12}) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL videos video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy_4_channels(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing( video_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing( video_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) self.image_processor_tester.num_channels = 3 def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), )
transformers/tests/models/tvp/test_image_processing_tvp.py/0
{ "file_path": "transformers/tests/models/tvp/test_image_processing_tvp.py", "repo_id": "transformers", "token_count": 5453 }
402
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch VitMatte model. """ import unittest from huggingface_hub import hf_hub_download from transformers import VitMatteConfig from transformers.testing_utils import ( require_torch, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import VitDetConfig, VitMatteForImageMatting from transformers.models.vitmatte.modeling_vitmatte import VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import VitMatteImageProcessor class VitMatteModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=16, num_channels=4, is_training=True, use_labels=False, hidden_size=2, num_hidden_layers=2, num_attention_heads=2, hidden_act="gelu", type_sequence_label_size=10, initializer_range=0.02, scope=None, out_features=["stage1"], fusion_hidden_sizes=[128, 64, 32, 16], ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_features = out_features self.fusion_hidden_sizes = fusion_hidden_sizes self.seq_length = (self.image_size // self.patch_size) ** 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: raise NotImplementedError("Training is not yet supported") config = self.get_config() return config, pixel_values, labels def get_backbone_config(self): return VitDetConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_size=self.hidden_size, is_training=self.is_training, hidden_act=self.hidden_act, out_features=self.out_features, ) def get_config(self): return VitMatteConfig( backbone_config=self.get_backbone_config(), backbone=None, hidden_size=self.hidden_size, fusion_hidden_sizes=self.fusion_hidden_sizes, ) def create_and_check_model(self, config, pixel_values, labels): model = VitMatteForImageMatting(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.alphas.shape, (self.batch_size, 1, self.image_size, self.image_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VitMatteModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as VitMatte does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (VitMatteForImageMatting,) if is_torch_available() else () pipeline_model_mapping = {} fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VitMatteModelTester(self) self.config_tester = ConfigTester(self, config_class=VitMatteConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="VitMatte does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Training is not yet supported") def test_training(self): pass @unittest.skip(reason="Training is not yet supported") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ViTMatte does not support input and output embeddings") def test_model_common_attributes(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = VitMatteForImageMatting.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="ViTMatte does not support retaining gradient on attention logits") def test_retain_grad_hidden_states_attentions(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [2, 2], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True print("Hello we're here") check_hidden_states_output(inputs_dict, config, model_class) @require_torch class VitMatteModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): processor = VitMatteImageProcessor.from_pretrained("hustvl/vitmatte-small-composition-1k") model = VitMatteForImageMatting.from_pretrained("hustvl/vitmatte-small-composition-1k").to(torch_device) filepath = hf_hub_download( repo_id="hf-internal-testing/image-matting-fixtures", filename="image.png", repo_type="dataset" ) image = Image.open(filepath).convert("RGB") filepath = hf_hub_download( repo_id="hf-internal-testing/image-matting-fixtures", filename="trimap.png", repo_type="dataset" ) trimap = Image.open(filepath).convert("L") # prepare image + trimap for the model inputs = processor(images=image, trimaps=trimap, return_tensors="pt").to(torch_device) with torch.no_grad(): alphas = model(**inputs).alphas expected_shape = torch.Size((1, 1, 640, 960)) self.assertEqual(alphas.shape, expected_shape) expected_slice = torch.tensor( [[0.9977, 0.9987, 0.9990], [0.9980, 0.9998, 0.9998], [0.9983, 0.9998, 0.9998]], device=torch_device ) self.assertTrue(torch.allclose(alphas[0, 0, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/vitmatte/test_modeling_vitmatte.py/0
{ "file_path": "transformers/tests/models/vitmatte/test_modeling_vitmatte.py", "repo_id": "transformers", "token_count": 4116 }
403
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class TFFlaubertModelIntegrationTest(unittest.TestCase): @slow def test_output_embeds_base_model(self): model = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base") features = { "input_ids": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]], dtype=tf.int32), # "My dog is cute" "attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]], dtype=tf.int32), } output = model(features)["last_hidden_state"] expected_shape = tf.TensorShape((1, 6, 768)) self.assertEqual(output.shape, expected_shape) # compare the actual values for a slice. expected_slice = tf.convert_to_tensor( [ [ [0.0681762, 0.10894451, 0.06772504], [-0.06423668, 0.02366615, 0.04329344], [-0.06057295, 0.09974135, -0.00070584], ] ], dtype=tf.float32, ) self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
transformers/tests/models/xlm_roberta/test_modeling_tf_xlm_roberta.py/0
{ "file_path": "transformers/tests/models/xlm_roberta/test_modeling_tf_xlm_roberta.py", "repo_id": "transformers", "token_count": 850 }
404
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import requests from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ImageToTextPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING tf_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING def get_test_pipeline(self, model, tokenizer, processor): pipe = pipeline("image-to-text", model=model, tokenizer=tokenizer, image_processor=processor) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "./tests/fixtures/tests_samples/COCO/000000039769.png", ] return pipe, examples def run_pipeline_test(self, pipe, examples): outputs = pipe(examples) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}], [{"generated_text": ANY(str)}], ], ) @require_tf def test_small_model_tf(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2", framework="tf") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual( outputs, [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" }, ], ) outputs = pipe([image, image]) self.assertEqual( outputs, [ [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], ], ) outputs = pipe(image, max_new_tokens=1) self.assertEqual( outputs, [{"generated_text": "growth"}], ) @require_torch def test_small_model_pt(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual( outputs, [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" }, ], ) outputs = pipe([image, image]) self.assertEqual( outputs, [ [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], ], ) @require_torch def test_small_model_pt_conditional(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-BlipForConditionalGeneration") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" prompt = "a photo of" outputs = pipe(image, prompt=prompt) self.assertTrue(outputs[0]["generated_text"].startswith(prompt)) @slow @require_torch def test_large_model_pt(self): pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) outputs = pipe([image, image]) self.assertEqual( outputs, [ [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], ], ) @slow @require_torch def test_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a pink pokemon pokemon with a blue shirt and a blue shirt"}]) @slow @require_torch def test_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cartoon of a purple character."}]) @slow @require_torch def test_conditional_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photography of" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photography of a volcano"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_torch def test_conditional_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photo of a" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photo of a tent with a tent and a tent in the background."}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_torch def test_conditional_generation_pt_pix2struct(self): pipe = pipeline("image-to-text", model="google/pix2struct-ai2d-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "ash cloud"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_tf def test_large_model_tf(self): pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en", framework="tf") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) outputs = pipe([image, image]) self.assertEqual( outputs, [ [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], ], ) @slow @require_torch def test_conditional_generation_llava(self): pipe = pipeline("image-to-text", model="llava-hf/bakLlava-v1-hf") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = ( "<image>\nUSER: What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud?\nASSISTANT:" ) outputs = pipe(image, prompt=prompt, generate_kwargs={"max_new_tokens": 200}) self.assertEqual( outputs, [ { "generated_text": "<image> \nUSER: What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud?\nASSISTANT: Lava" } ], )
transformers/tests/pipelines/test_pipelines_image_to_text.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_image_to_text.py", "repo_id": "transformers", "token_count": 4393 }
405
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ZeroShotImageClassificationPipelineTests(unittest.TestCase): # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # and only CLIP would be there for now. # model_mapping = {CLIPConfig: CLIPModel} # def get_test_pipeline(self, model, tokenizer, processor): # if tokenizer is None: # # Side effect of no Fast Tokenizer class for these model, so skipping # # But the slow tokenizer test should still run as they're quite small # self.skipTest("No tokenizer available") # return # # return None, None # image_classifier = ZeroShotImageClassificationPipeline( # model=model, tokenizer=tokenizer, feature_extractor=processor # ) # # test with a raw waveform # image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # image2 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # return image_classifier, [image, image2] # def run_pipeline_test(self, pipe, examples): # image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # outputs = pipe(image, candidate_labels=["A", "B"]) # self.assertEqual(outputs, {"text": ANY(str)}) # # Batching # outputs = pipe([image] * 3, batch_size=2, candidate_labels=["A", "B"]) @require_torch def test_small_model_pt(self): image_classifier = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["a", "b", "c"]) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(output), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], [{"score": 0.333, "label": "b"}, {"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}], ], ) output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2) self.assertEqual( nested_simplify(output), # Pipeline outputs are supposed to be deterministic and # So we could in theory have real values "A", "B", "C" instead # of ANY(str). # However it seems that in this particular case, the floating # scores are so close, we enter floating error approximation # and the order is not guaranteed anymore with batching. [ [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], ], ) @require_tf def test_small_model_tf(self): image_classifier = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["a", "b", "c"]) self.assertEqual( nested_simplify(output), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], ) output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2) self.assertEqual( nested_simplify(output), # Pipeline outputs are supposed to be deterministic and # So we could in theory have real values "A", "B", "C" instead # of ANY(str). # However it seems that in this particular case, the floating # scores are so close, we enter floating error approximation # and the order is not guaranteed anymore with batching. [ [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], ], ) @slow @require_torch def test_large_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["cat", "plane", "remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_tf def test_large_model_tf(self): image_classifier = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["cat", "plane", "remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_torch def test_siglip_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="google/siglip-base-patch16-224", ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["2 cats", "a plane", "a remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.198, "label": "2 cats"}, {"score": 0.0, "label": "a remote"}, {"score": 0.0, "label": "a plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["2 cats", "a plane", "a remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.198, "label": "2 cats"}, {"score": 0.0, "label": "a remote"}, {"score": 0.0, "label": "a plane"}, ] ] * 5, )
transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py", "repo_id": "transformers", "token_count": 5307 }
406
import importlib def is_sagemaker_available(): return importlib.util.find_spec("sagemaker") is not None
transformers/tests/sagemaker/__init__.py/0
{ "file_path": "transformers/tests/sagemaker/__init__.py", "repo_id": "transformers", "token_count": 36 }
407
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, Wav2Vec2FeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR = get_tests_dir("fixtures") class FeatureExtractorUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") # This check we did call the fake head request mock_head.assert_called() def test_legacy_load_from_url(self): # This test is for deprecated behavior and can be removed in v5 _ = Wav2Vec2FeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class FeatureExtractorPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-feature-extractor") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-feature-extractor-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-feature-extractor") except HTTPError: pass def test_push_to_hub(self): feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub("test-feature-extractor", token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) # Reset repo delete_repo(token=self._token, repo_id="test-feature-extractor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( tmp_dir, repo_id="test-feature-extractor", push_to_hub=True, token=self._token ) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) def test_push_to_hub_in_organization(self): feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub("valid_org/test-feature-extractor", token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("valid_org/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-feature-extractor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( tmp_dir, repo_id="valid_org/test-feature-extractor-org", push_to_hub=True, token=self._token ) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) def test_push_to_hub_dynamic_feature_extractor(self): CustomFeatureExtractor.register_for_auto_class() feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub("test-dynamic-feature-extractor", token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map, {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"}, ) new_feature_extractor = AutoFeatureExtractor.from_pretrained( f"{USER}/test-dynamic-feature-extractor", trust_remote_code=True ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__, "CustomFeatureExtractor")
transformers/tests/test_feature_extraction_utils.py/0
{ "file_path": "transformers/tests/test_feature_extraction_utils.py", "repo_id": "transformers", "token_count": 2408 }
408
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import concurrent.futures import json import os import shutil import tempfile import unittest from transformers import AutoTokenizer, PreTrainedTokenizerFast from transformers.testing_utils import require_tokenizers from ..test_tokenization_common import TokenizerTesterMixin @require_tokenizers class PreTrainedTokenizationFastTest(TokenizerTesterMixin, unittest.TestCase): rust_tokenizer_class = PreTrainedTokenizerFast test_slow_tokenizer = False test_rust_tokenizer = True from_pretrained_vocab_key = "tokenizer_file" def setUp(self): self.test_rust_tokenizer = False # because we don't have pretrained_vocab_files_map super().setUp() self.test_rust_tokenizer = True model_paths = ["robot-test/dummy-tokenizer-fast", "robot-test/dummy-tokenizer-wordlevel"] self.bytelevel_bpe_model_name = "SaulLu/dummy-tokenizer-bytelevel-bpe" # Inclusion of 2 tokenizers to test different types of models (Unigram and WordLevel for the moment) self.tokenizers_list = [(PreTrainedTokenizerFast, model_path, {}) for model_path in model_paths] tokenizer = PreTrainedTokenizerFast.from_pretrained(model_paths[0]) tokenizer.save_pretrained(self.tmpdirname) def test_tokenizer_mismatch_warning(self): # We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any # model pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_encode_decode_with_spaces(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_added_tokens_serialization(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_additional_special_tokens_serialization(self): pass def test_pretrained_model_lists(self): # We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any # model pass def test_prepare_for_model(self): # We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any # model pass def test_rust_tokenizer_signature(self): # PreTrainedTokenizerFast doesn't have tokenizer_file in its signature pass def test_training_new_tokenizer(self): tmpdirname_orig = self.tmpdirname # Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel. for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): try: self.tmpdirname = tempfile.mkdtemp() tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer.save_pretrained(self.tmpdirname) super().test_training_new_tokenizer() finally: # Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer # is restored shutil.rmtree(self.tmpdirname) self.tmpdirname = tmpdirname_orig def test_training_new_tokenizer_with_special_tokens_change(self): tmpdirname_orig = self.tmpdirname # Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel. for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): try: self.tmpdirname = tempfile.mkdtemp() tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer.save_pretrained(self.tmpdirname) super().test_training_new_tokenizer_with_special_tokens_change() finally: # Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer # is restored shutil.rmtree(self.tmpdirname) self.tmpdirname = tmpdirname_orig def test_training_new_tokenizer_with_bytelevel(self): tokenizer = self.rust_tokenizer_class.from_pretrained(self.bytelevel_bpe_model_name) toy_text_iterator = ("a" for _ in range(1000)) new_tokenizer = tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50) encoding_ids = new_tokenizer.encode("a🤗") self.assertEqual(encoding_ids, [64, 172, 253, 97, 245]) def test_init_from_tokenizers_model(self): from tokenizers import Tokenizer sentences = ["Hello, y'all!", "How are you 😁 ? There should not be any issue right?"] tokenizer = Tokenizer.from_pretrained("t5-base") # Enable padding tokenizer.enable_padding(pad_id=0, pad_token="<pad>", length=512, pad_to_multiple_of=8) self.assertEqual( tokenizer.padding, { "length": 512, "pad_to_multiple_of": 8, "pad_id": 0, "pad_token": "<pad>", "pad_type_id": 0, "direction": "right", }, ) fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) tmpdirname = tempfile.mkdtemp() fast_tokenizer.save_pretrained(tmpdirname) fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname) for tok in [fast_tokenizer, fast_from_saved]: self.assertEqual(tok.pad_token_id, 0) self.assertEqual(tok.padding_side, "right") self.assertEqual(tok.pad_token, "<pad>") self.assertEqual(tok.init_kwargs["max_length"], 512) self.assertEqual(tok.init_kwargs["pad_to_multiple_of"], 8) self.assertEqual(tok(sentences, padding = True), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1, 0, 0, 0, 0,0, 0, 0, 0],[ 571, 33, 25, 3, 2, 3, 58, 290, 225, 59, 36, 136, 962, 269, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip tokenizer.enable_truncation(8, stride=0, strategy="longest_first", direction="right") self.assertEqual( tokenizer.truncation, {"max_length": 8, "stride": 0, "strategy": "longest_first", "direction": "right"} ) fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) tmpdirname = tempfile.mkdtemp() fast_tokenizer.save_pretrained(tmpdirname) fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname) for tok in [fast_tokenizer, fast_from_saved]: self.assertEqual(tok.truncation_side, "right") self.assertEqual(tok.init_kwargs["truncation_strategy"], "longest_first") self.assertEqual(tok.init_kwargs["max_length"], 8) self.assertEqual(tok.init_kwargs["stride"], 0) # NOTE even if the model has a default max_length, it is not used... # thus tok(sentences, truncation = True) does nothing and does not warn either self.assertEqual(tok(sentences, truncation = True, max_length = 8), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1],[ 571, 33, 25, 3, 2, 3, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip @require_tokenizers class TokenizerVersioningTest(unittest.TestCase): def test_local_versioning(self): tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") json_tokenizer = json.loads(tokenizer._tokenizer.to_str()) json_tokenizer["model"]["vocab"]["huggingface"] = len(tokenizer) with tempfile.TemporaryDirectory() as tmp_dir: # Hack to save this in the tokenizer_config.json tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.4.0.0.json"] tokenizer.save_pretrained(tmp_dir) json.dump(json_tokenizer, open(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), "w")) # This should pick the new tokenizer file as the version of Transformers is > 4.0.0 new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertEqual(len(new_tokenizer), len(tokenizer) + 1) json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str()) self.assertIn("huggingface", json_tokenizer["model"]["vocab"]) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old tokenizer file as the version of Transformers is < 4.0.0 shutil.move(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), os.path.join(tmp_dir, "tokenizer.42.0.0.json")) tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.42.0.0.json"] tokenizer.save_pretrained(tmp_dir) new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertEqual(len(new_tokenizer), len(tokenizer)) json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str()) self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"]) def test_repo_versioning(self): # This repo has two tokenizer files, one for v4.0.0 and above with an added token, one for versions lower. repo = "hf-internal-testing/test-two-tokenizers" # This should pick the new tokenizer file as the version of Transformers is > 4.0.0 tokenizer = AutoTokenizer.from_pretrained(repo) self.assertEqual(len(tokenizer), 28997) json_tokenizer = json.loads(tokenizer._tokenizer.to_str()) self.assertIn("huggingface", json_tokenizer["model"]["vocab"]) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers old_transformers.tokenization_utils_base.__version__ = "3.0.0" old_tokenizer = old_transformers.models.auto.AutoTokenizer.from_pretrained(repo) self.assertEqual(len(old_tokenizer), 28996) json_tokenizer = json.loads(old_tokenizer._tokenizer.to_str()) self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"]) @require_tokenizers class ReduceMutableBorrowTests(unittest.TestCase): def test_async_share_tokenizer(self): # See https://github.com/huggingface/transformers/pull/12550 # and https://github.com/huggingface/tokenizers/issues/537 tokenizer = PreTrainedTokenizerFast.from_pretrained("robot-test/dummy-tokenizer-wordlevel") text = "The Matrix is a 1999 science fiction action film." with concurrent.futures.ThreadPoolExecutor() as executor: futures = [executor.submit(self.fetch, tokenizer, text) for i in range(10)] return_value = [future.result() for future in futures] self.assertEqual(return_value, [[1, 10, 0, 8, 0, 18, 0, 0, 0, 2] for i in range(10)]) def fetch(self, tokenizer, text): return tokenizer.encode(text, truncation="longest_first", padding="longest")
transformers/tests/tokenization/test_tokenization_fast.py/0
{ "file_path": "transformers/tests/tokenization/test_tokenization_fast.py", "repo_id": "transformers", "token_count": 5163 }
409
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow logger = logging.getLogger() @unittest.skip("Temporarily disable the doc tests.") @require_torch @require_tf @slow class TestCodeExamples(unittest.TestCase): def analyze_directory( self, directory: Path, identifier: Union[str, None] = None, ignore_files: Union[List[str], None] = None, n_identifier: Union[str, List[str], None] = None, only_modules: bool = True, ): """ Runs through the specific directory, looking for the files identified with `identifier`. Executes the doctests in those files Args: directory (`Path`): Directory containing the files identifier (`str`): Will parse files containing this ignore_files (`List[str]`): List of files to skip n_identifier (`str` or `List[str]`): Will not parse files containing this/these identifiers. only_modules (`bool`): Whether to only analyze modules """ files = [file for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))] if identifier is not None: files = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(n_identifier, List): for n_ in n_identifier: files = [file for file in files if n_ not in file] else: files = [file for file in files if n_identifier not in file] ignore_files = ignore_files or [] ignore_files.append("__init__.py") files = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing", file) if only_modules: module_identifier = file.split(".")[0] try: module_identifier = getattr(transformers, module_identifier) suite = doctest.DocTestSuite(module_identifier) result = unittest.TextTestRunner().run(suite) self.assertIs(len(result.failures), 0) except AttributeError: logger.info(f"{module_identifier} is not a module.") else: result = doctest.testfile(str(".." / directory / file), optionflags=doctest.ELLIPSIS) self.assertIs(result.failed, 0) def test_modeling_examples(self): transformers_directory = Path("src/transformers") files = "modeling" ignore_files = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(transformers_directory, identifier=files, ignore_files=ignore_files) def test_tokenization_examples(self): transformers_directory = Path("src/transformers") files = "tokenization" self.analyze_directory(transformers_directory, identifier=files) def test_configuration_examples(self): transformers_directory = Path("src/transformers") files = "configuration" self.analyze_directory(transformers_directory, identifier=files) def test_remaining_examples(self): transformers_directory = Path("src/transformers") n_identifiers = ["configuration", "modeling", "tokenization"] self.analyze_directory(transformers_directory, n_identifier=n_identifiers) def test_doc_sources(self): doc_source_directory = Path("docs/source") ignore_files = ["favicon.ico"] self.analyze_directory(doc_source_directory, ignore_files=ignore_files, only_modules=False)
transformers/tests/utils/test_doc_samples.py/0
{ "file_path": "transformers/tests/utils/test_doc_samples.py", "repo_id": "transformers", "token_count": 1747 }
410
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A script to add and/or update the attribute `pipeline_model_mapping` in model test files. This script will be (mostly) used in the following 2 situations: - run within a (scheduled) CI job to: - check if model test files in the library have updated `pipeline_model_mapping`, - and/or update test files and (possibly) open a GitHub pull request automatically - being run by a `transformers` member to quickly check and update some particular test file(s) This script is **NOT** intended to be run (manually) by community contributors. """ import argparse import glob import inspect import os import re import unittest from get_test_info import get_test_classes from tests.test_pipeline_mixin import pipeline_test_mapping PIPELINE_TEST_MAPPING = {} for task, _ in pipeline_test_mapping.items(): PIPELINE_TEST_MAPPING[task] = {"pt": None, "tf": None} # DO **NOT** add item to this set (unless the reason is approved) TEST_FILE_TO_IGNORE = { "tests/models/esm/test_modeling_esmfold.py", # The pipeline test mapping is added to `test_modeling_esm.py` } def get_framework(test_class): """Infer the framework from the test class `test_class`.""" if "ModelTesterMixin" in [x.__name__ for x in test_class.__bases__]: return "pt" elif "TFModelTesterMixin" in [x.__name__ for x in test_class.__bases__]: return "tf" elif "FlaxModelTesterMixin" in [x.__name__ for x in test_class.__bases__]: return "flax" else: return None def get_mapping_for_task(task, framework): """Get mappings defined in `XXXPipelineTests` for the task `task`.""" # Use the cached results if PIPELINE_TEST_MAPPING[task].get(framework, None) is not None: return PIPELINE_TEST_MAPPING[task][framework] pipeline_test_class = pipeline_test_mapping[task]["test"] mapping = None if framework == "pt": mapping = getattr(pipeline_test_class, "model_mapping", None) elif framework == "tf": mapping = getattr(pipeline_test_class, "tf_model_mapping", None) if mapping is not None: mapping = dict(mapping.items()) # cache the results PIPELINE_TEST_MAPPING[task][framework] = mapping return mapping def get_model_for_pipeline_test(test_class, task): """Get the model architecture(s) related to the test class `test_class` for a pipeline `task`.""" framework = get_framework(test_class) if framework is None: return None mapping = get_mapping_for_task(task, framework) if mapping is None: return None config_classes = list({model_class.config_class for model_class in test_class.all_model_classes}) if len(config_classes) != 1: raise ValueError("There should be exactly one configuration class from `test_class.all_model_classes`.") # This could be a list/tuple of model classes, but it's rare. model_class = mapping.get(config_classes[0], None) if isinstance(model_class, (tuple, list)): model_class = sorted(model_class, key=lambda x: x.__name__) return model_class def get_pipeline_model_mapping(test_class): """Get `pipeline_model_mapping` for `test_class`.""" mapping = [(task, get_model_for_pipeline_test(test_class, task)) for task in pipeline_test_mapping] mapping = sorted([(task, model) for task, model in mapping if model is not None], key=lambda x: x[0]) return dict(mapping) def get_pipeline_model_mapping_string(test_class): """Get `pipeline_model_mapping` for `test_class` as a string (to be added to the test file). This will be a 1-line string. After this is added to a test file, `make style` will format it beautifully. """ framework = get_framework(test_class) if framework == "pt": framework = "torch" default_value = "{}" mapping = get_pipeline_model_mapping(test_class) if len(mapping) == 0: return "" texts = [] for task, model_classes in mapping.items(): if isinstance(model_classes, (tuple, list)): # A list/tuple of model classes value = "(" + ", ".join([x.__name__ for x in model_classes]) + ")" else: # A single model class value = model_classes.__name__ texts.append(f'"{task}": {value}') text = "{" + ", ".join(texts) + "}" text = f"pipeline_model_mapping = {text} if is_{framework}_available() else {default_value}" return text def is_valid_test_class(test_class): """Restrict to `XXXModelTesterMixin` and should be a subclass of `unittest.TestCase`.""" base_class_names = {"ModelTesterMixin", "TFModelTesterMixin", "FlaxModelTesterMixin"} if not issubclass(test_class, unittest.TestCase): return False return len(base_class_names.intersection([x.__name__ for x in test_class.__bases__])) > 0 def find_test_class(test_file): """Find a test class in `test_file` to which we will add `pipeline_model_mapping`.""" test_classes = [x for x in get_test_classes(test_file) if is_valid_test_class(x)] target_test_class = None for test_class in test_classes: # If a test class has defined `pipeline_model_mapping`, let's take it if getattr(test_class, "pipeline_model_mapping", None) is not None: target_test_class = test_class break # Take the test class with the shortest name (just a heuristic) if target_test_class is None and len(test_classes) > 0: target_test_class = sorted(test_classes, key=lambda x: (len(x.__name__), x.__name__))[0] return target_test_class def find_block_ending(lines, start_idx, indent_level): end_idx = start_idx for idx, line in enumerate(lines[start_idx:]): indent = len(line) - len(line.lstrip()) if idx == 0 or indent > indent_level or (indent == indent_level and line.strip() == ")"): end_idx = start_idx + idx elif idx > 0 and indent <= indent_level: # Outside the definition block of `pipeline_model_mapping` break return end_idx def add_pipeline_model_mapping(test_class, overwrite=False): """Add `pipeline_model_mapping` to `test_class`.""" if getattr(test_class, "pipeline_model_mapping", None) is not None: if not overwrite: return "", -1 line_to_add = get_pipeline_model_mapping_string(test_class) if len(line_to_add) == 0: return "", -1 line_to_add = line_to_add + "\n" # The code defined the class `test_class` class_lines, class_start_line_no = inspect.getsourcelines(test_class) # `inspect` gives the code for an object, including decorator(s) if any. # We (only) need the exact line of the class definition. for idx, line in enumerate(class_lines): if line.lstrip().startswith("class "): class_lines = class_lines[idx:] class_start_line_no += idx break class_end_line_no = class_start_line_no + len(class_lines) - 1 # The index in `class_lines` that starts the definition of `all_model_classes`, `all_generative_model_classes` or # `pipeline_model_mapping`. This assumes they are defined in such order, and we take the start index of the last # block that appears in a `test_class`. start_idx = None # The indent level of the line at `class_lines[start_idx]` (if defined) indent_level = 0 # To record if `pipeline_model_mapping` is found in `test_class`. def_line = None for idx, line in enumerate(class_lines): if line.strip().startswith("all_model_classes = "): indent_level = len(line) - len(line.lstrip()) start_idx = idx elif line.strip().startswith("all_generative_model_classes = "): indent_level = len(line) - len(line.lstrip()) start_idx = idx elif line.strip().startswith("pipeline_model_mapping = "): indent_level = len(line) - len(line.lstrip()) start_idx = idx def_line = line break if start_idx is None: return "", -1 # Find the ending index (inclusive) of the above found block. end_idx = find_block_ending(class_lines, start_idx, indent_level) # Extract `is_xxx_available()` from existing blocks: some models require specific libraries like `timm` and use # `is_timm_available()` instead of `is_torch_available()`. # Keep leading and trailing whitespaces r = re.compile(r"\s(is_\S+?_available\(\))\s") for line in class_lines[start_idx : end_idx + 1]: backend_condition = r.search(line) if backend_condition is not None: # replace the leading and trailing whitespaces to the space character " ". target = " " + backend_condition[0][1:-1] + " " line_to_add = r.sub(target, line_to_add) break if def_line is None: # `pipeline_model_mapping` is not defined. The target index is set to the ending index (inclusive) of # `all_model_classes` or `all_generative_model_classes`. target_idx = end_idx else: # `pipeline_model_mapping` is defined. The target index is set to be one **BEFORE** its start index. target_idx = start_idx - 1 # mark the lines of the currently existing `pipeline_model_mapping` to be removed. for idx in range(start_idx, end_idx + 1): # These lines are going to be removed before writing to the test file. class_lines[idx] = None # noqa # Make sure the test class is a subclass of `PipelineTesterMixin`. parent_classes = [x.__name__ for x in test_class.__bases__] if "PipelineTesterMixin" not in parent_classes: # Put `PipelineTesterMixin` just before `unittest.TestCase` _parent_classes = [x for x in parent_classes if x != "TestCase"] + ["PipelineTesterMixin"] if "TestCase" in parent_classes: # Here we **assume** the original string is always with `unittest.TestCase`. _parent_classes.append("unittest.TestCase") parent_classes = ", ".join(_parent_classes) for idx, line in enumerate(class_lines): # Find the ending of the declaration of `test_class` if line.strip().endswith("):"): # mark the lines of the declaration of `test_class` to be removed for _idx in range(idx + 1): class_lines[_idx] = None # noqa break # Add the new, one-line, class declaration for `test_class` class_lines[0] = f"class {test_class.__name__}({parent_classes}):\n" # Add indentation line_to_add = " " * indent_level + line_to_add # Insert `pipeline_model_mapping` to `class_lines`. # (The line at `target_idx` should be kept by definition!) class_lines = class_lines[: target_idx + 1] + [line_to_add] + class_lines[target_idx + 1 :] # Remove the lines that are marked to be removed class_lines = [x for x in class_lines if x is not None] # Move from test class to module (in order to write to the test file) module_lines = inspect.getsourcelines(inspect.getmodule(test_class))[0] # Be careful with the 1-off between line numbers and array indices module_lines = module_lines[: class_start_line_no - 1] + class_lines + module_lines[class_end_line_no:] code = "".join(module_lines) moddule_file = inspect.getsourcefile(test_class) with open(moddule_file, "w", encoding="UTF-8", newline="\n") as fp: fp.write(code) return line_to_add def add_pipeline_model_mapping_to_test_file(test_file, overwrite=False): """Add `pipeline_model_mapping` to `test_file`.""" test_class = find_test_class(test_file) if test_class: add_pipeline_model_mapping(test_class, overwrite=overwrite) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--test_file", type=str, help="A path to the test file, starting with the repository's `tests` directory." ) parser.add_argument( "--all", action="store_true", help="If to check and modify all test files.", ) parser.add_argument( "--overwrite", action="store_true", help="If to overwrite a test class if it has already defined `pipeline_model_mapping`.", ) args = parser.parse_args() if not args.all and not args.test_file: raise ValueError("Please specify either `test_file` or pass `--all` to check/modify all test files.") elif args.all and args.test_file: raise ValueError("Only one of `--test_file` and `--all` could be specified.") test_files = [] if args.test_file: test_files = [args.test_file] else: pattern = os.path.join("tests", "models", "**", "test_modeling_*.py") for test_file in glob.glob(pattern): # `Flax` is not concerned at this moment if not test_file.startswith("test_modeling_flax_"): test_files.append(test_file) for test_file in test_files: if test_file in TEST_FILE_TO_IGNORE: print(f"[SKIPPED] {test_file} is skipped as it is in `TEST_FILE_TO_IGNORE` in the file {__file__}.") continue add_pipeline_model_mapping_to_test_file(test_file, overwrite=args.overwrite)
transformers/utils/add_pipeline_model_mapping_to_test.py/0
{ "file_path": "transformers/utils/add_pipeline_model_mapping_to_test.py", "repo_id": "transformers", "token_count": 5411 }
411