text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Audio/Text processor class for CLAP """ from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class ClapProcessor(ProcessorMixin): r""" Constructs a CLAP processor which wraps a CLAP feature extractor and a RoBerta tokenizer into a single processor. [`ClapProcessor`] offers all the functionalities of [`ClapFeatureExtractor`] and [`RobertaTokenizerFast`]. See the [`~ClapProcessor.__call__`] and [`~ClapProcessor.decode`] for more information. Args: feature_extractor ([`ClapFeatureExtractor`]): The audio processor is a required input. tokenizer ([`RobertaTokenizerFast`]): The tokenizer is a required input. """ feature_extractor_class = "ClapFeatureExtractor" tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, text=None, audios=None, return_tensors=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text` and `kwargs` arguments to RobertaTokenizerFast's [`~RobertaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to ClapFeatureExtractor's [`~ClapFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). audios (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels, and T the sample length of the audio. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **audio_features** -- Audio features to be fed to a model. Returned when `audios` is not `None`. """ sampling_rate = kwargs.pop("sampling_rate", None) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none.") if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if audios is not None: audio_features = self.feature_extractor( audios, sampling_rate=sampling_rate, return_tensors=return_tensors, **kwargs ) if text is not None and audios is not None: encoding["input_features"] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**audio_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
transformers/src/transformers/models/clap/processing_clap.py/0
{ "file_path": "transformers/src/transformers/models/clap/processing_clap.py", "repo_id": "transformers", "token_count": 2177 }
309
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for CLIPSeg """ import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class CLIPSegProcessor(ProcessorMixin): r""" Constructs a CLIPSeg processor which wraps a CLIPSeg image processor and a CLIP tokenizer into a single processor. [`CLIPSegProcessor`] offers all the functionalities of [`ViTImageProcessor`] and [`CLIPTokenizerFast`]. See the [`~CLIPSegProcessor.__call__`] and [`~CLIPSegProcessor.decode`] for more information. Args: image_processor ([`ViTImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`CLIPTokenizerFast`], *optional*): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "ViTImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) def __call__(self, text=None, images=None, visual_prompt=None, return_tensors=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to ViTImageProcessor's [`~ViTImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. visual_prompt (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The visual prompt image or batch of images to be prepared. Each visual prompt image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images.") if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt.") if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if visual_prompt is not None: prompt_features = self.image_processor(visual_prompt, return_tensors=return_tensors, **kwargs) if images is not None: image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs) if visual_prompt is not None and images is not None: encoding = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: encoding["pixel_values"] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: encoding = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor
transformers/src/transformers/models/clipseg/processing_clipseg.py/0
{ "file_path": "transformers/src/transformers/models/clipseg/processing_clipseg.py", "repo_id": "transformers", "token_count": 3092 }
310
# coding=utf-8 # Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json", }, "merges_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt", }, "tokenizer_file": { "Salesforce/codegen-350M-mono": ( "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json" ), }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "Salesforce/codegen-350M-mono": 2048, } class CodeGenTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" CodeGen tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import CodeGenTokenizerFast >>> tokenizer = CodeGenTokenizerFast.from_pretrained("Salesforce/codegen-350M-mono") >>> tokenizer("Hello world")["input_ids"] [15496, 995] >>> tokenizer(" Hello world")["input_ids"] [18435, 995] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): Path to the vocabulary file. merges_file (`str`, *optional*): Path to the merges file. tokenizer_file (`str`, *optional*): Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The end of sequence token. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (CodeGen tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = CodeGenTokenizer def __init__( self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, **kwargs, ): super().__init__( vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_prefix_space=add_prefix_space, **kwargs, ) if kwargs.pop("add_bos_token", False): model_id = kwargs.pop("name_or_path", "") raise ValueError( "Currenty GPT2's fast tokenizer does NOT support adding a BOS token. " "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n" f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n" f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n" "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005." " so that the fast tokenizer works correctly." ) pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) pre_tok_state["add_prefix_space"] = add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) self.add_prefix_space = add_prefix_space def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get("is_split_into_words", False) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get("is_split_into_words", False) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def decode( self, token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, truncate_before_pattern: Optional[List[str]] = None, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. If `None`, will default to `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`). truncate_before_pattern (`List[str]`, *optional*, defaults to `None`): A list of regular expression strings that will be used to truncate the returned string. This can be used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str`: The decoded sentence. """ decoded_text = super().decode( token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) if truncate_before_pattern is not None and len(truncate_before_pattern) > 0: decoded_text = self.truncate(decoded_text, truncate_before_pattern) return decoded_text def truncate(self, completion, truncate_before_pattern): def find_re(string, pattern, start_pos): m = pattern.search(string, start_pos) return m.start() if m else -1 terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern] prints = list(re.finditer("^print", completion, re.MULTILINE)) if len(prints) > 1: completion = completion[: prints[1].start()] defs = list(re.finditer("^def", completion, re.MULTILINE)) if len(defs) > 1: completion = completion[: defs[1].start()] start_pos = 0 terminals_pos = [ pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1 ] if len(terminals_pos) > 0: return completion[: min(terminals_pos)] else: return completion
transformers/src/transformers/models/codegen/tokenization_codegen_fast.py/0
{ "file_path": "transformers/src/transformers/models/codegen/tokenization_codegen_fast.py", "repo_id": "transformers", "token_count": 4249 }
311
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _import_structure = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["feature_extraction_deit"] = ["DeiTFeatureExtractor"] _import_structure["image_processing_deit"] = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_deit"] = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_deit"] = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/deit/__init__.py/0
{ "file_path": "transformers/src/transformers/models/deit/__init__.py", "repo_id": "transformers", "token_count": 1438 }
312
# coding=utf-8 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MMBT configuration""" from ....utils import logging logger = logging.get_logger(__name__) class MMBTConfig(object): """ This is the configuration class to store the configuration of a [`MMBTModel`]. It is used to instantiate a MMBT model according to the specified arguments, defining the model architecture. Args: config ([`PreTrainedConfig`]): Config of the underlying Transformer models. Its values are copied over to use a single config. num_labels (`int`, *optional*): Size of final Linear layer for classification. modal_hidden_size (`int`, *optional*, defaults to 2048): Embedding dimension of the non-text modality encoder. """ def __init__(self, config, num_labels=None, modal_hidden_size=2048): self.__dict__ = config.__dict__ self.modal_hidden_size = modal_hidden_size if num_labels: self.num_labels = num_labels
transformers/src/transformers/models/deprecated/mmbt/configuration_mmbt.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/mmbt/configuration_mmbt.py", "repo_id": "transformers", "token_count": 531 }
313
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Dilated Neighborhood Attention Transformer model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json", # See all Dinat models at https://huggingface.co/models?filter=dinat } class DinatConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DinatModel`]. It is used to instantiate a Dinat model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Dinat [shi-labs/dinat-mini-in1k-224](https://huggingface.co/shi-labs/dinat-mini-in1k-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. NOTE: Only patch size of 4 is supported at the moment. num_channels (`int`, *optional*, defaults to 3): The number of input channels. embed_dim (`int`, *optional*, defaults to 64): Dimensionality of patch embedding. depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 5]`): Number of layers in each level of the encoder. num_heads (`List[int]`, *optional*, defaults to `[2, 4, 8, 16]`): Number of attention heads in each layer of the Transformer encoder. kernel_size (`int`, *optional*, defaults to 7): Neighborhood Attention kernel size. dilations (`List[List[int]]`, *optional*, defaults to `[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]]`): Dilation value of each NA layer in the Transformer encoder. mlp_ratio (`float`, *optional*, defaults to 3.0): Ratio of MLP hidden dimensionality to embedding dimensionality. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. layer_scale_init_value (`float`, *optional*, defaults to 0.0): The initial value for the layer scale. Disabled if <=0. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import DinatConfig, DinatModel >>> # Initializing a Dinat shi-labs/dinat-mini-in1k-224 style configuration >>> configuration = DinatConfig() >>> # Initializing a model (with random weights) from the shi-labs/dinat-mini-in1k-224 style configuration >>> model = DinatModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "dinat" attribute_map = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, patch_size=4, num_channels=3, embed_dim=64, depths=[3, 4, 6, 5], num_heads=[2, 4, 8, 16], kernel_size=7, dilations=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]], mlp_ratio=3.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", initializer_range=0.02, layer_norm_eps=1e-5, layer_scale_init_value=0.0, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.kernel_size = kernel_size self.dilations = dilations self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) self.layer_scale_init_value = layer_scale_init_value self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names )
transformers/src/transformers/models/dinat/configuration_dinat.py/0
{ "file_path": "transformers/src/transformers/models/dinat/configuration_dinat.py", "repo_id": "transformers", "token_count": 2945 }
314
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Donut Swin Transformer model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class DonutSwinConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DonutSwinModel`]. It is used to instantiate a Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Donut [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. embed_dim (`int`, *optional*, defaults to 96): Dimensionality of patch embedding. depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`): Depth of each layer in the Transformer encoder. num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`): Number of attention heads in each layer of the Transformer encoder. window_size (`int`, *optional*, defaults to 7): Size of windows. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of MLP hidden dimensionality to embedding dimensionality. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. use_absolute_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to add absolute position embeddings to the patch embeddings. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. Example: ```python >>> from transformers import DonutSwinConfig, DonutSwinModel >>> # Initializing a Donut naver-clova-ix/donut-base style configuration >>> configuration = DonutSwinConfig() >>> # Randomly initializing a model from the naver-clova-ix/donut-base style configuration >>> model = DonutSwinModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "donut-swin" attribute_map = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-5, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
transformers/src/transformers/models/donut/configuration_donut_swin.py/0
{ "file_path": "transformers/src/transformers/models/donut/configuration_donut_swin.py", "repo_id": "transformers", "token_count": 2322 }
315
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DPT 3.1 checkpoints from the MiDaS repository. URL: https://github.com/isl-org/MiDaS""" import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import BeitConfig, DPTConfig, DPTForDepthEstimation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dpt_config(model_name): hidden_size = 768 num_hidden_layers = 12 num_attention_heads = 12 intermediate_size = 3072 out_features = ["stage3", "stage6", "stage9", "stage12"] # beit-base-384 uses [2, 5, 8, 11] if "large" in model_name: hidden_size = 1024 num_hidden_layers = 24 num_attention_heads = 16 intermediate_size = 4096 out_features = ["stage6", "stage12", "stage18", "stage24"] # beit-large-512 uses [5, 11, 17, 23] if "512" in model_name: image_size = 512 elif "384" in model_name: image_size = 384 else: raise ValueError("Model not supported") backbone_config = BeitConfig( image_size=image_size, num_hidden_layers=num_hidden_layers, hidden_size=hidden_size, intermediate_size=intermediate_size, num_attention_heads=num_attention_heads, use_relative_position_bias=True, reshape_hidden_states=False, out_features=out_features, ) neck_hidden_sizes = [256, 512, 1024, 1024] if "large" in model_name else [96, 192, 384, 768] config = DPTConfig(backbone_config=backbone_config, neck_hidden_sizes=neck_hidden_sizes) return config, image_size # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config): rename_keys = [] # fmt: off # stem rename_keys.append(("pretrained.model.cls_token", "backbone.embeddings.cls_token")) rename_keys.append(("pretrained.model.patch_embed.proj.weight", "backbone.embeddings.patch_embeddings.projection.weight")) rename_keys.append(("pretrained.model.patch_embed.proj.bias", "backbone.embeddings.patch_embeddings.projection.bias")) # Transfomer encoder for i in range(config.backbone_config.num_hidden_layers): rename_keys.append((f"pretrained.model.blocks.{i}.gamma_1", f"backbone.encoder.layer.{i}.lambda_1")) rename_keys.append((f"pretrained.model.blocks.{i}.gamma_2", f"backbone.encoder.layer.{i}.lambda_2")) rename_keys.append((f"pretrained.model.blocks.{i}.norm1.weight", f"backbone.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((f"pretrained.model.blocks.{i}.norm1.bias", f"backbone.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append((f"pretrained.model.blocks.{i}.norm2.weight", f"backbone.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((f"pretrained.model.blocks.{i}.norm2.bias", f"backbone.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((f"pretrained.model.blocks.{i}.mlp.fc1.weight", f"backbone.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((f"pretrained.model.blocks.{i}.mlp.fc1.bias", f"backbone.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((f"pretrained.model.blocks.{i}.mlp.fc2.weight", f"backbone.encoder.layer.{i}.output.dense.weight")) rename_keys.append((f"pretrained.model.blocks.{i}.mlp.fc2.bias", f"backbone.encoder.layer.{i}.output.dense.bias")) rename_keys.append((f"pretrained.model.blocks.{i}.attn.proj.weight", f"backbone.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((f"pretrained.model.blocks.{i}.attn.proj.bias", f"backbone.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((f"pretrained.model.blocks.{i}.attn.relative_position_bias_table", f"backbone.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_bias_table")) rename_keys.append((f"pretrained.model.blocks.{i}.attn.relative_position_index", f"backbone.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_index")) # activation postprocessing (readout projections + resize blocks) for i in range(4): rename_keys.append((f"pretrained.act_postprocess{i+1}.0.project.0.weight", f"neck.reassemble_stage.readout_projects.{i}.0.weight")) rename_keys.append((f"pretrained.act_postprocess{i+1}.0.project.0.bias", f"neck.reassemble_stage.readout_projects.{i}.0.bias")) rename_keys.append((f"pretrained.act_postprocess{i+1}.3.weight", f"neck.reassemble_stage.layers.{i}.projection.weight")) rename_keys.append((f"pretrained.act_postprocess{i+1}.3.bias", f"neck.reassemble_stage.layers.{i}.projection.bias")) if i != 2: rename_keys.append((f"pretrained.act_postprocess{i+1}.4.weight", f"neck.reassemble_stage.layers.{i}.resize.weight")) rename_keys.append((f"pretrained.act_postprocess{i+1}.4.bias", f"neck.reassemble_stage.layers.{i}.resize.bias")) # refinenet (tricky here) mapping = {1:3, 2:2, 3:1, 4:0} for i in range(1, 5): j = mapping[i] rename_keys.append((f"scratch.refinenet{i}.out_conv.weight", f"neck.fusion_stage.layers.{j}.projection.weight")) rename_keys.append((f"scratch.refinenet{i}.out_conv.bias", f"neck.fusion_stage.layers.{j}.projection.bias")) rename_keys.append((f"scratch.refinenet{i}.resConfUnit1.conv1.weight", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution1.weight")) rename_keys.append((f"scratch.refinenet{i}.resConfUnit1.conv1.bias", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution1.bias")) rename_keys.append((f"scratch.refinenet{i}.resConfUnit1.conv2.weight", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution2.weight")) rename_keys.append((f"scratch.refinenet{i}.resConfUnit1.conv2.bias", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution2.bias")) rename_keys.append((f"scratch.refinenet{i}.resConfUnit2.conv1.weight", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution1.weight")) rename_keys.append((f"scratch.refinenet{i}.resConfUnit2.conv1.bias", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution1.bias")) rename_keys.append((f"scratch.refinenet{i}.resConfUnit2.conv2.weight", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution2.weight")) rename_keys.append((f"scratch.refinenet{i}.resConfUnit2.conv2.bias", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution2.bias")) # scratch convolutions for i in range(4): rename_keys.append((f"scratch.layer{i+1}_rn.weight", f"neck.convs.{i}.weight")) # head for i in range(0, 5, 2): rename_keys.append((f"scratch.output_conv.{i}.weight", f"head.head.{i}.weight")) rename_keys.append((f"scratch.output_conv.{i}.bias", f"head.head.{i}.bias")) return rename_keys def remove_ignore_keys_(state_dict): ignore_keys = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(k, None) # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config): hidden_size = config.backbone_config.hidden_size for i in range(config.backbone_config.num_hidden_layers): # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"pretrained.model.blocks.{i}.attn.qkv.weight") q_bias = state_dict.pop(f"pretrained.model.blocks.{i}.attn.q_bias") v_bias = state_dict.pop(f"pretrained.model.blocks.{i}.attn.v_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[:hidden_size, :] state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.bias"] = q_bias state_dict[f"backbone.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ hidden_size : hidden_size * 2, : ] state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-hidden_size:, :] state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.bias"] = v_bias def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): """ Copy/paste/tweak model's weights to our DPT structure. """ name_to_url = { "dpt-beit-large-512": "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt", "dpt-beit-large-384": "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_384.pt", "dpt-beit-base-384": "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_base_384.pt", } # define DPT configuration based on URL checkpoint_url = name_to_url[model_name] config, image_size = get_dpt_config(model_name) # load original state_dict from URL state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") # remove certain keys remove_ignore_keys_(state_dict) # rename keys rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) # read in qkv matrices read_in_q_k_v(state_dict, config) # load HuggingFace model model = DPTForDepthEstimation(config) missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) print("Missing keys:", missing_keys) print("Unexpected keys:", unexpected_keys) assert missing_keys == [] # assert unexpected_keys == ["pretrained.model.fc_norm.weight", "pretrained.model.fc_norm.bias"] model.eval() # Check outputs on an image # We set `keep_aspect_ratio=False` as our current BEiT does not support arbitrary window sizes processor = DPTImageProcessor( size={"height": image_size, "width": image_size}, keep_aspect_ratio=False, ensure_multiple_of=32 ) image = prepare_img() pixel_values = processor(image, return_tensors="pt").pixel_values print("First values of pixel values:", pixel_values[0, 0, :3, :3]) print("Mean of pixel values:", pixel_values.mean().item()) print("Shape of pixel values:", pixel_values.shape) import requests from PIL import Image from torchvision import transforms url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) transforms = transforms.Compose( [ transforms.Resize((image_size, image_size)), transforms.ToTensor(), ] ) pixel_values = transforms(image).unsqueeze(0) # forward pass with torch.no_grad(): outputs = model(pixel_values) predicted_depth = outputs.predicted_depth print("Shape of predicted depth:", predicted_depth.shape) print("First values of predicted depth:", predicted_depth[0, :3, :3]) # assert logits # TODO there's still a small difference with the original logits if model_name == "dpt-beit-large-512": # OK, checked expected_shape = torch.Size([1, 512, 512]) expected_slice = torch.tensor( [[2804.6260, 2792.5708, 2812.9263], [2772.0288, 2780.1118, 2796.2529], [2748.1094, 2766.6558, 2766.9834]] ) elif model_name == "dpt-beit-large-384": # OK, checked expected_shape = torch.Size([1, 384, 384]) expected_slice = torch.tensor( [[1783.2273, 1780.5729, 1792.6453], [1759.9817, 1765.5359, 1778.5002], [1739.1633, 1754.7903, 1757.1990]], ) elif model_name == "dpt-beit-base-384": # OK, checked expected_shape = torch.Size([1, 384, 384]) expected_slice = torch.tensor( [[2898.4482, 2891.3750, 2904.8079], [2858.6685, 2877.2615, 2894.4507], [2842.1235, 2854.1023, 2861.6328]], ) assert predicted_depth.shape == torch.Size(expected_shape) assert torch.allclose(predicted_depth[0, :3, :3], expected_slice) print("Looks ok!") if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model and processor to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print("Pushing model and processor to hub...") model.push_to_hub(repo_id=f"nielsr/{model_name}") processor.push_to_hub(repo_id=f"nielsr/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="dpt-beit-large-512", type=str, choices=["dpt-beit-large-512", "dpt-beit-large-384", "dpt-beit-base-384"], help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub after conversion.", ) args = parser.parse_args() convert_dpt_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/dpt/convert_dpt_beit_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/dpt/convert_dpt_beit_to_hf.py", "repo_id": "transformers", "token_count": 5894 }
316
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class EncoderDecoderConfig(PretrainedConfig): r""" [`EncoderDecoderConfig`] is the configuration class to store the configuration of a [`EncoderDecoderModel`]. It is used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder configs. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: kwargs (*optional*): Dictionary of keyword arguments. Notably: - **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the encoder config. - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the decoder config. Examples: ```python >>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel >>> # Initializing a BERT google-bert/bert-base-uncased style configuration >>> config_encoder = BertConfig() >>> config_decoder = BertConfig() >>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) >>> # Initializing a Bert2Bert model (with random weights) from the google-bert/bert-base-uncased style configurations >>> model = EncoderDecoderModel(config=config) >>> # Accessing the model configuration >>> config_encoder = model.config.encoder >>> config_decoder = model.config.decoder >>> # set decoder config to causal lm >>> config_decoder.is_decoder = True >>> config_decoder.add_cross_attention = True >>> # Saving the model, including its configuration >>> model.save_pretrained("my-model") >>> # loading model and config from pretrained folder >>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained("my-model") >>> model = EncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config) ```""" model_type = "encoder-decoder" is_composition = True def __init__(self, **kwargs): super().__init__(**kwargs) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" encoder_config = kwargs.pop("encoder") encoder_model_type = encoder_config.pop("model_type") decoder_config = kwargs.pop("decoder") decoder_model_type = decoder_config.pop("model_type") from ..auto.configuration_auto import AutoConfig self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config) self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config) self.is_encoder_decoder = True @classmethod def from_encoder_decoder_configs( cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs ) -> PretrainedConfig: r""" Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and decoder model configuration. Returns: [`EncoderDecoderConfig`]: An instance of a configuration object """ logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config") decoder_config.is_decoder = True decoder_config.add_cross_attention = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
transformers/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py/0
{ "file_path": "transformers/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py", "repo_id": "transformers", "token_count": 1497 }
317
# coding=utf-8 # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch ESM model.""" from __future__ import annotations import os from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithPastAndCrossAttentions, TFBaseModelOutputWithPoolingAndCrossAttentions, TFMaskedLMOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, shape_list, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, stable_softmax from ...utils import logging from .configuration_esm import EsmConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/esm2_t6_8M_UR50D" _CONFIG_FOR_DOC = "EsmConfig" TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/esm2_t6_8M_UR50D", "facebook/esm2_t12_35M_UR50D", # This is not a complete list of all ESM models! # See all ESM models at https://huggingface.co/models?filter=esm ] def rotate_half(x): x1, x2 = tf.split(x, 2, axis=-1) return tf.concat((-x2, x1), axis=-1) def apply_rotary_pos_emb(x, cos, sin): cos = cos[:, :, : tf.shape(x)[-2], :] sin = sin[:, :, : tf.shape(x)[-2], :] return (x * cos) + (rotate_half(x) * sin) def symmetrize(x): "Make layer symmetric in final two dimensions, used for contact prediction." return x + tf.linalg.matrix_transpose(x) # Transposes last two dimensions only def average_product_correct(x): "Perform average product correct, used for contact prediction." a1 = tf.reduce_sum(x, -1, keepdims=True) a2 = tf.reduce_sum(x, -2, keepdims=True) a12 = tf.reduce_sum(x, (-1, -2), keepdims=True) avg = a1 * a2 avg = avg / a12 normalized = x - avg return normalized class TFRotaryEmbedding(keras.layers.Layer): """ Rotary position embeddings based on those in [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation matrices which depend on their relative positions. """ def __init__(self, dim: int, name=None): super().__init__(name=name) # Matt: The PyTorch version of this layer does a lot of work to cache values, but we just rely on TF compilation # and/or XLA to sort out constants like that. It actually may not seem like this layer needs to be stateful at # all when we benefit from TF compilation, but it does. The reason is that self.inv_freq is a buffer in the # original implementation, but all the shared ESM checkpoints were trained with fp16 params. This means that # the inv_freq tensor was stored as a float16, and we need to replicate those lower-precision values or our # models give different outputs from the original. self.dim = dim def build(self, input_shape): super().build(input_shape) self.inv_freq = self.add_weight( "inv_freq", shape=(self.dim // 2,), dtype=tf.float32, initializer=get_initializer(1.0), trainable=False ) self.inv_freq.assign( 1.0 / (10000 ** (tf.range(start=0, limit=self.dim, delta=2, dtype=tf.float32) / self.dim)) ) def _compute_cos_sin(self, x, seq_dimension=2): seq_len = tf.shape(x)[seq_dimension] t = tf.range(seq_len, dtype=self.inv_freq.dtype) freqs = tf.einsum("i, j -> ij", t, self.inv_freq) # Outer multiplication emb = tf.concat((freqs, freqs), axis=-1)[None, None, :, :] return tf.cos(emb), tf.sin(emb) def call(self, q: tf.Tensor, k: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: cos_emb, sin_emb = self._compute_cos_sin(k, seq_dimension=-2) return ( apply_rotary_pos_emb(q, cos_emb, sin_emb), apply_rotary_pos_emb(k, cos_emb, sin_emb), ) class TFEsmContactPredictionHead(keras.layers.Layer): """Performs symmetrization, apc, and computes a logistic regression on the output features""" def __init__( self, in_features: int, bias=True, eos_idx: int = 2, name=None, ): super().__init__(name=name) self.eos_idx = eos_idx self.in_features = in_features self.regression = keras.layers.Dense(1, use_bias=bias, activation="sigmoid", name="regression") def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "regression", None) is not None: with tf.name_scope(self.regression.name): self.regression.build((None, self.in_features)) def call(self, tokens, attentions): # remove eos token attentions eos_mask = tf.cast(tokens != self.eos_idx, attentions.dtype) eos_mask = tf.expand_dims(eos_mask, 1) * tf.expand_dims(eos_mask, 2) attentions = attentions * eos_mask[:, None, None, :, :] attentions = attentions[..., :-1, :-1] # remove cls token attentions attentions = attentions[..., 1:, 1:] batch_size, layers, heads, seqlen, _ = shape_list(attentions) attentions = tf.reshape(attentions, (batch_size, layers * heads, seqlen, seqlen)) # features: batch x channels x tokens x tokens (symmetric) attentions = average_product_correct(symmetrize(attentions)) attentions = tf.transpose(attentions, perm=(0, 2, 3, 1)) return tf.squeeze(self.regression(attentions), 3) class TFEsmEmbeddings(keras.layers.Layer): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config, name=None): super().__init__(name=name) self.word_embeddings = keras.layers.Embedding( config.vocab_size, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="word_embeddings", ) self.position_embeddings = keras.layers.Embedding( config.max_position_embeddings, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="position_embeddings", ) if config.emb_layer_norm_before: self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") else: self.layer_norm = None # Matt: I think this line was copied incorrectly from BERT, disabling for now # self.dropout = Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.position_ids = tf.range(config.max_position_embeddings)[None, :] self.padding_idx = config.pad_token_id self.token_dropout = config.token_dropout self.mask_token_id = config.mask_token_id self.config = config def call( self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = self.word_embeddings(input_ids) # Note that if we want to support ESM-1 (not 1b!) in future then we need to support an # embedding_scale factor here. embeddings = inputs_embeds # Matt: ESM has the option to handle masking in MLM in a slightly unusual way. If the token_dropout # flag is False then it is handled in the same was as BERT/RoBERTa. If it is set to True, however, # masked tokens are treated as if they were selected for input dropout and zeroed out. # This "mask-dropout" is compensated for when masked tokens are not present, by scaling embeddings by # a factor of (fraction of unmasked tokens during training) / (fraction of unmasked tokens in sample). # This is analogous to the way that dropout layers scale down outputs during evaluation when not # actually dropping out values (or, equivalently, scale up their un-dropped outputs in training). if self.token_dropout: embeddings = tf.where((input_ids == self.mask_token_id)[:, :, None], 0.0, embeddings) mask_ratio_train = 0.15 * 0.8 # Hardcoded as the ratio used in all ESM model training runs src_lengths = tf.cast(tf.reduce_sum(attention_mask, axis=-1), tf.float32) masked_tokens = input_ids == self.mask_token_id mask_ratio_observed = tf.math.count_nonzero(masked_tokens, dtype=tf.float32, axis=-1) / src_lengths embeddings = embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None] if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings if self.layer_norm is not None: embeddings = self.layer_norm(embeddings) if attention_mask is not None: embeddings = embeddings * tf.cast(tf.expand_dims(attention_mask, -1), embeddings.dtype) # Matt: I think this line was copied incorrectly from BERT, disabling it for now. # embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: tf.Tensor Returns: tf.Tensor """ input_shape = shape_list(inputs_embeds)[:-1] sequence_length = input_shape[1] position_ids = tf.range( start=self.padding_idx + 1, limit=sequence_length + self.padding_idx + 1, dtype=tf.int64 ) return tf.broadcast_to(tf.expand_dims(position_ids, 0), input_shape) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "word_embeddings", None) is not None: with tf.name_scope(self.word_embeddings.name): self.word_embeddings.build(None) if getattr(self, "position_embeddings", None) is not None: with tf.name_scope(self.position_embeddings.name): self.position_embeddings.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) class TFEsmSelfAttention(keras.layers.Layer): def __init__(self, config, position_embedding_type=None, name=None): super().__init__(name=name) if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) self.rotary_embeddings = None if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = keras.layers.Embedding( 2 * config.max_position_embeddings - 1, self.attention_head_size, embeddings_initializer=get_initializer(config.initializer_range), ) elif self.position_embedding_type == "rotary": self.rotary_embeddings = TFRotaryEmbedding(dim=self.attention_head_size, name="rotary_embeddings") self.is_decoder = config.is_decoder self.config = config def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor: new_x_shape = shape_list(x)[:-1] + [self.num_attention_heads, self.attention_head_size] x = tf.reshape(x, new_x_shape) return tf.transpose(x, perm=(0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, encoder_hidden_states: tf.Tensor | None = None, encoder_attention_mask: tf.Tensor | None = None, past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, output_attentions: Optional[bool] = False, training: bool = False, ) -> Tuple[tf.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Matt: Our BERT model (which this code was derived from) scales attention logits down by sqrt(head_dim). # ESM scales the query down by the same factor instead. Modulo numerical stability these are equivalent, # but not when rotary embeddings get involved. Therefore, we scale the query here to match the original # ESM code and fix rotary embeddings. query_layer = query_layer * self.attention_head_size**-0.5 if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) if self.position_embedding_type == "rotary": query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = shape_list(hidden_states)[1] position_ids_l = tf.expand_dims(tf.range(seq_length, dtype=tf.int64), -1) position_ids_r = tf.expand_dims(tf.range(seq_length, dtype=tf.int64), 0) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = tf.cast(positional_embedding, query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = tf.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in EsmModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = attention_probs @ value_layer context_layer = tf.transpose(context_layer, perm=(0, 2, 1, 3)) new_context_layer_shape = shape_list(context_layer)[:-2] + [self.all_head_size] context_layer = tf.reshape(context_layer, new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) if getattr(self, "rotary_embeddings", None) is not None: with tf.name_scope(self.rotary_embeddings.name): self.rotary_embeddings.build(None) class TFEsmSelfOutput(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states += input_tensor return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFEsmAttention(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.self = TFEsmSelfAttention(config, name="self") self.output_layer = TFEsmSelfOutput(config, name="output") self.pruned_heads = set() self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def prune_heads(self, heads): raise NotImplementedError def call( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, training=False, ): hidden_states_ln = self.LayerNorm(hidden_states) self_outputs = self.self( hidden_states_ln, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, training, ) attention_output = self.output_layer(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self", None) is not None: with tf.name_scope(self.self.name): self.self.build(None) if getattr(self, "output_layer", None) is not None: with tf.name_scope(self.output_layer.name): self.output_layer.build(None) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFEsmIntermediate(keras.layers.Layer): def __init__(self, config: EsmConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = tf.nn.gelu(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFEsmOutput(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states += input_tensor return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) class TFEsmLayer(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = TFEsmAttention(config, name="attention") self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise RuntimeError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TFEsmAttention(config) self.intermediate = TFEsmIntermediate(config, name="intermediate") self.output_layer = TFEsmOutput(config, name="output") self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, training=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, training=training, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise AttributeError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated" " with cross-attention layers by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, training=training, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layernorm_output = self.LayerNorm(attention_output) intermediate_output = self.intermediate(hidden_states=layernorm_output) layer_output = self.output_layer( hidden_states=intermediate_output, input_tensor=attention_output, training=training ) outputs = (layer_output,) + outputs # add attentions if we output them # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "output_layer", None) is not None: with tf.name_scope(self.output_layer.name): self.output_layer.build(None) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFEsmEncoder(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.config = config self.layer = [TFEsmLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] self.emb_layer_norm_after = keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="emb_layer_norm_after" ) def call( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, training=False, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if self.emb_layer_norm_after: hidden_states = self.emb_layer_norm_after(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "emb_layer_norm_after", None) is not None: with tf.name_scope(self.emb_layer_norm_after.name): self.emb_layer_norm_after.build([None, None, self.config.hidden_size]) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Esm class TFEsmPooler(keras.layers.Layer): def __init__(self, config: EsmConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFEsmPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = EsmConfig base_model_prefix = "esm" ESM_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Keras [Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular Keras model and refer to the TF/Keras documentation for all matters related to general usage and behavior. Parameters: config ([`EsmConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ ESM_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.", ESM_START_DOCSTRING, ) class TFEsmMainLayer(keras.layers.Layer): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config, add_pooling_layer=True, name=None, **kwargs): super().__init__(name=name, **kwargs) self.config = config self.is_decoder = config.is_decoder self.embeddings = TFEsmEmbeddings(config, name="embeddings") self.encoder = TFEsmEncoder(config, name="encoder") self.pooler = TFEsmPooler(config, name="pooler") if add_pooling_layer else None self.contact_head = TFEsmContactPredictionHead( in_features=self.config.num_hidden_layers * self.config.num_attention_heads, bias=True, name="contact_head" ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) if getattr(self, "contact_head", None) is not None: with tf.name_scope(self.contact_head.name): self.contact_head.build(None) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.word_embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: if not self.config.is_decoder: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape if past_key_values is None: past_key_values_length = 0 past_key_values = [None] * len(self.encoder.layer) else: past_key_values_length = shape_list(past_key_values[0][0])[-2] if attention_mask is None: attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) embedding_output = self.embeddings( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask_shape = shape_list(attention_mask) mask_seq_length = seq_length + past_key_values_length # Copied from `modeling_tf_t5.py` # Provided a padding mask of dimensions [batch_size, mask_seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] if self.is_decoder: seq_ids = tf.range(mask_seq_length) causal_mask = tf.less_equal( tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)), seq_ids[None, :, None], ) causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype) extended_attention_mask = causal_mask * attention_mask[:, None, :] attention_mask_shape = shape_list(extended_attention_mask) extended_attention_mask = tf.reshape( extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2]) ) if past_key_values[0] is not None: # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length] extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] else: extended_attention_mask = tf.reshape( attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Copied from `modeling_tf_t5.py` with -1e9 -> -10000 if self.is_decoder and encoder_attention_mask is not None: # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask, # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2))) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) def predict_contacts(self, tokens, attention_mask): attns = self(tokens, attention_mask=attention_mask, return_dict=True, output_attentions=True).attentions attns = tf.stack(attns, axis=1) # Matches the original model layout # In the original model, attentions for padding tokens are completely zeroed out. # This makes no difference most of the time because the other tokens won't attend to them, # but it does for the contact prediction task, which takes attentions as input, # so we have to mimic that here. attention_mask = tf.cast(attention_mask, attns.dtype) attns *= attention_mask[:, None, None, None] attns *= attention_mask[:, None, None, :, None] return self.contact_head(tokens, attns) @add_start_docstrings( "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.", ESM_START_DOCSTRING, ) class TFEsmModel(TFEsmPreTrainedModel): def __init__(self, config: EsmConfig, add_pooling_layer=True, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.esm = TFEsmMainLayer(config, add_pooling_layer=add_pooling_layer, name="esm") @unpack_inputs @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation """ outputs = self.esm( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def predict_contacts(self, tokens, attention_mask): return self.esm.predict_contacts(tokens, attention_mask) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "esm", None) is not None: with tf.name_scope(self.esm.name): self.esm.build(None) @add_start_docstrings("""ESM Model with a `language modeling` head on top.""", ESM_START_DOCSTRING) class TFEsmForMaskedLM(TFEsmPreTrainedModel, TFMaskedLanguageModelingLoss): _keys_to_ignore_on_load_missing = [r"position_ids"] _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm") self.lm_head = TFEsmLMHead(config, name="lm_head") if config.tie_word_embeddings: # Ensure word embeddings are built so that we actually have something to tie with tf.name_scope(os.path.join(self._name_scope(), "esm", "embeddings", "word_embeddings")): self.esm.embeddings.word_embeddings.build((None, None)) self.lm_head.decoder = self.esm.embeddings.word_embeddings.weights[0] def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings def get_lm_head(self): return self.lm_head @unpack_inputs @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask="<mask>", ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, labels: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` kwargs (`Dict[str, any]`, optional, defaults to *{}*): Used to hide legacy arguments that have been deprecated. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: masked_lm_loss = self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return TFMaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def predict_contacts(self, tokens, attention_mask): return self.esm.predict_contacts(tokens, attention_mask) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "esm", None) is not None: with tf.name_scope(self.esm.name): self.esm.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) class TFEsmLMHead(keras.layers.Layer): """ESM Head for masked language modeling.""" def __init__(self, config, name=None): super().__init__(name=name) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") if config.tie_word_embeddings: self.decoder = None else: self.decoder = keras.layers.Dense( config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name="decoder", use_bias=False, ) self.config = config def build(self, input_shape=None): # Separate bias to match the PT model and allow weight cross-loading to work # Put it in the build so it gets the right name when adding it as a weight if self.built: return self.built = True self.bias = self.add_weight("bias", shape=(self.config.vocab_size,), initializer="zeros", trainable=True) if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "decoder", None) is not None and not self.config.tie_word_embeddings: with tf.name_scope(self.decoder.name): self.decoder.build([None, None, self.config.hidden_size]) def get_bias(self): return {"bias": self.bias} def call(self, features): x = self.dense(features) x = tf.nn.gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias if self.config.tie_word_embeddings: x = tf.matmul(x, self.decoder, transpose_b=True) + self.bias else: x = self.decoder(x) + self.bias return x @add_start_docstrings( """ ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ESM_START_DOCSTRING, ) class TFEsmForSequenceClassification(TFEsmPreTrainedModel, TFSequenceClassificationLoss): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm") self.classifier = TFEsmClassificationHead(config, name="classifier") @unpack_inputs @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, labels: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "esm", None) is not None: with tf.name_scope(self.esm.name): self.esm.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings( """ ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, ESM_START_DOCSTRING, ) class TFEsmForTokenClassification(TFEsmPreTrainedModel, TFTokenClassificationLoss): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = keras.layers.Dense(config.num_labels, name="classifier") self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, labels: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "esm", None) is not None: with tf.name_scope(self.esm.name): self.esm.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) class TFEsmClassificationHead(keras.layers.Layer): """Head for sentence-level classification tasks.""" def __init__(self, config, name=None): super().__init__(name=name) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.out_proj = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), activation="linear", name="out_proj", ) self.config = config def call(self, features, training=False): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x, training=training) x = self.dense(x) x = self.dropout(x, training=training) x = self.out_proj(x) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.config.hidden_size]) def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: tf.Tensor x: Returns: tf.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = tf.cast(input_ids != padding_idx, tf.int64) incremental_indices = (tf.cumsum(mask, axis=1) + past_key_values_length) * mask return incremental_indices + padding_idx
transformers/src/transformers/models/esm/modeling_tf_esm.py/0
{ "file_path": "transformers/src/transformers/models/esm/modeling_tf_esm.py", "repo_id": "transformers", "token_count": 30031 }
318
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ FastSpeech2Conformer model configuration""" from typing import Dict from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { "espnet/fastspeech2_conformer": "https://huggingface.co/espnet/fastspeech2_conformer/raw/main/config.json", } FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP = { "espnet/fastspeech2_conformer_hifigan": "https://huggingface.co/espnet/fastspeech2_conformer_hifigan/raw/main/config.json", } FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP = { "espnet/fastspeech2_conformer_with_hifigan": "https://huggingface.co/espnet/fastspeech2_conformer_with_hifigan/raw/main/config.json", } class FastSpeech2ConformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FastSpeech2ConformerModel`]. It is used to instantiate a FastSpeech2Conformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FastSpeech2Conformer [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 384): The dimensionality of the hidden layers. vocab_size (`int`, *optional*, defaults to 78): The size of the vocabulary. num_mel_bins (`int`, *optional*, defaults to 80): The number of mel filters used in the filter bank. encoder_num_attention_heads (`int`, *optional*, defaults to 2): The number of attention heads in the encoder. encoder_layers (`int`, *optional*, defaults to 4): The number of layers in the encoder. encoder_linear_units (`int`, *optional*, defaults to 1536): The number of units in the linear layer of the encoder. decoder_layers (`int`, *optional*, defaults to 4): The number of layers in the decoder. decoder_num_attention_heads (`int`, *optional*, defaults to 2): The number of attention heads in the decoder. decoder_linear_units (`int`, *optional*, defaults to 1536): The number of units in the linear layer of the decoder. speech_decoder_postnet_layers (`int`, *optional*, defaults to 5): The number of layers in the post-net of the speech decoder. speech_decoder_postnet_units (`int`, *optional*, defaults to 256): The number of units in the post-net layers of the speech decoder. speech_decoder_postnet_kernel (`int`, *optional*, defaults to 5): The kernel size in the post-net of the speech decoder. positionwise_conv_kernel_size (`int`, *optional*, defaults to 3): The size of the convolution kernel used in the position-wise layer. encoder_normalize_before (`bool`, *optional*, defaults to `False`): Specifies whether to normalize before encoder layers. decoder_normalize_before (`bool`, *optional*, defaults to `False`): Specifies whether to normalize before decoder layers. encoder_concat_after (`bool`, *optional*, defaults to `False`): Specifies whether to concatenate after encoder layers. decoder_concat_after (`bool`, *optional*, defaults to `False`): Specifies whether to concatenate after decoder layers. reduction_factor (`int`, *optional*, defaults to 1): The factor by which the speech frame rate is reduced. speaking_speed (`float`, *optional*, defaults to 1.0): The speed of the speech produced. use_macaron_style_in_conformer (`bool`, *optional*, defaults to `True`): Specifies whether to use macaron style in the conformer. use_cnn_in_conformer (`bool`, *optional*, defaults to `True`): Specifies whether to use convolutional neural networks in the conformer. encoder_kernel_size (`int`, *optional*, defaults to 7): The kernel size used in the encoder. decoder_kernel_size (`int`, *optional*, defaults to 31): The kernel size used in the decoder. duration_predictor_layers (`int`, *optional*, defaults to 2): The number of layers in the duration predictor. duration_predictor_channels (`int`, *optional*, defaults to 256): The number of channels in the duration predictor. duration_predictor_kernel_size (`int`, *optional*, defaults to 3): The kernel size used in the duration predictor. energy_predictor_layers (`int`, *optional*, defaults to 2): The number of layers in the energy predictor. energy_predictor_channels (`int`, *optional*, defaults to 256): The number of channels in the energy predictor. energy_predictor_kernel_size (`int`, *optional*, defaults to 3): The kernel size used in the energy predictor. energy_predictor_dropout (`float`, *optional*, defaults to 0.5): The dropout rate in the energy predictor. energy_embed_kernel_size (`int`, *optional*, defaults to 1): The kernel size used in the energy embed layer. energy_embed_dropout (`float`, *optional*, defaults to 0.0): The dropout rate in the energy embed layer. stop_gradient_from_energy_predictor (`bool`, *optional*, defaults to `False`): Specifies whether to stop gradients from the energy predictor. pitch_predictor_layers (`int`, *optional*, defaults to 5): The number of layers in the pitch predictor. pitch_predictor_channels (`int`, *optional*, defaults to 256): The number of channels in the pitch predictor. pitch_predictor_kernel_size (`int`, *optional*, defaults to 5): The kernel size used in the pitch predictor. pitch_predictor_dropout (`float`, *optional*, defaults to 0.5): The dropout rate in the pitch predictor. pitch_embed_kernel_size (`int`, *optional*, defaults to 1): The kernel size used in the pitch embed layer. pitch_embed_dropout (`float`, *optional*, defaults to 0.0): The dropout rate in the pitch embed layer. stop_gradient_from_pitch_predictor (`bool`, *optional*, defaults to `True`): Specifies whether to stop gradients from the pitch predictor. encoder_dropout_rate (`float`, *optional*, defaults to 0.2): The dropout rate in the encoder. encoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2): The positional dropout rate in the encoder. encoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2): The attention dropout rate in the encoder. decoder_dropout_rate (`float`, *optional*, defaults to 0.2): The dropout rate in the decoder. decoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2): The positional dropout rate in the decoder. decoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2): The attention dropout rate in the decoder. duration_predictor_dropout_rate (`float`, *optional*, defaults to 0.2): The dropout rate in the duration predictor. speech_decoder_postnet_dropout (`float`, *optional*, defaults to 0.5): The dropout rate in the speech decoder postnet. max_source_positions (`int`, *optional*, defaults to 5000): if `"relative"` position embeddings are used, defines the maximum source input positions. use_masking (`bool`, *optional*, defaults to `True`): Specifies whether to use masking in the model. use_weighted_masking (`bool`, *optional*, defaults to `False`): Specifies whether to use weighted masking in the model. num_speakers (`int`, *optional*): Number of speakers. If set to > 1, assume that the speaker ids will be provided as the input and use speaker id embedding layer. num_languages (`int`, *optional*): Number of languages. If set to > 1, assume that the language ids will be provided as the input and use the languge id embedding layer. speaker_embed_dim (`int`, *optional*): Speaker embedding dimension. If set to > 0, assume that speaker_embedding will be provided as the input. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Specifies whether the model is an encoder-decoder. Example: ```python >>> from transformers import FastSpeech2ConformerModel, FastSpeech2ConformerConfig >>> # Initializing a FastSpeech2Conformer style configuration >>> configuration = FastSpeech2ConformerConfig() >>> # Initializing a model from the FastSpeech2Conformer style configuration >>> model = FastSpeech2ConformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "fastspeech2_conformer" attribute_map = {"num_hidden_layers": "encoder_layers", "num_attention_heads": "encoder_num_attention_heads"} def __init__( self, hidden_size=384, vocab_size=78, num_mel_bins=80, encoder_num_attention_heads=2, encoder_layers=4, encoder_linear_units=1536, decoder_layers=4, decoder_num_attention_heads=2, decoder_linear_units=1536, speech_decoder_postnet_layers=5, speech_decoder_postnet_units=256, speech_decoder_postnet_kernel=5, positionwise_conv_kernel_size=3, encoder_normalize_before=False, decoder_normalize_before=False, encoder_concat_after=False, decoder_concat_after=False, reduction_factor=1, speaking_speed=1.0, use_macaron_style_in_conformer=True, use_cnn_in_conformer=True, encoder_kernel_size=7, decoder_kernel_size=31, duration_predictor_layers=2, duration_predictor_channels=256, duration_predictor_kernel_size=3, energy_predictor_layers=2, energy_predictor_channels=256, energy_predictor_kernel_size=3, energy_predictor_dropout=0.5, energy_embed_kernel_size=1, energy_embed_dropout=0.0, stop_gradient_from_energy_predictor=False, pitch_predictor_layers=5, pitch_predictor_channels=256, pitch_predictor_kernel_size=5, pitch_predictor_dropout=0.5, pitch_embed_kernel_size=1, pitch_embed_dropout=0.0, stop_gradient_from_pitch_predictor=True, encoder_dropout_rate=0.2, encoder_positional_dropout_rate=0.2, encoder_attention_dropout_rate=0.2, decoder_dropout_rate=0.2, decoder_positional_dropout_rate=0.2, decoder_attention_dropout_rate=0.2, duration_predictor_dropout_rate=0.2, speech_decoder_postnet_dropout=0.5, max_source_positions=5000, use_masking=True, use_weighted_masking=False, num_speakers=None, num_languages=None, speaker_embed_dim=None, is_encoder_decoder=True, **kwargs, ): if positionwise_conv_kernel_size % 2 == 0: raise ValueError( f"positionwise_conv_kernel_size must be odd, but got {positionwise_conv_kernel_size} instead." ) if encoder_kernel_size % 2 == 0: raise ValueError(f"encoder_kernel_size must be odd, but got {encoder_kernel_size} instead.") if decoder_kernel_size % 2 == 0: raise ValueError(f"decoder_kernel_size must be odd, but got {decoder_kernel_size} instead.") if duration_predictor_kernel_size % 2 == 0: raise ValueError( f"duration_predictor_kernel_size must be odd, but got {duration_predictor_kernel_size} instead." ) if energy_predictor_kernel_size % 2 == 0: raise ValueError( f"energy_predictor_kernel_size must be odd, but got {energy_predictor_kernel_size} instead." ) if energy_embed_kernel_size % 2 == 0: raise ValueError(f"energy_embed_kernel_size must be odd, but got {energy_embed_kernel_size} instead.") if pitch_predictor_kernel_size % 2 == 0: raise ValueError( f"pitch_predictor_kernel_size must be odd, but got {pitch_predictor_kernel_size} instead." ) if pitch_embed_kernel_size % 2 == 0: raise ValueError(f"pitch_embed_kernel_size must be odd, but got {pitch_embed_kernel_size} instead.") if hidden_size % encoder_num_attention_heads != 0: raise ValueError("The hidden_size must be evenly divisible by encoder_num_attention_heads.") if hidden_size % decoder_num_attention_heads != 0: raise ValueError("The hidden_size must be evenly divisible by decoder_num_attention_heads.") if use_masking and use_weighted_masking: raise ValueError("Either use_masking or use_weighted_masking can be True, but not both.") self.hidden_size = hidden_size self.vocab_size = vocab_size self.num_mel_bins = num_mel_bins self.encoder_config = { "num_attention_heads": encoder_num_attention_heads, "layers": encoder_layers, "kernel_size": encoder_kernel_size, "attention_dropout_rate": encoder_attention_dropout_rate, "dropout_rate": encoder_dropout_rate, "positional_dropout_rate": encoder_positional_dropout_rate, "linear_units": encoder_linear_units, "normalize_before": encoder_normalize_before, "concat_after": encoder_concat_after, } self.decoder_config = { "num_attention_heads": decoder_num_attention_heads, "layers": decoder_layers, "kernel_size": decoder_kernel_size, "attention_dropout_rate": decoder_attention_dropout_rate, "dropout_rate": decoder_dropout_rate, "positional_dropout_rate": decoder_positional_dropout_rate, "linear_units": decoder_linear_units, "normalize_before": decoder_normalize_before, "concat_after": decoder_concat_after, } self.encoder_num_attention_heads = encoder_num_attention_heads self.encoder_layers = encoder_layers self.duration_predictor_channels = duration_predictor_channels self.duration_predictor_kernel_size = duration_predictor_kernel_size self.duration_predictor_layers = duration_predictor_layers self.energy_embed_dropout = energy_embed_dropout self.energy_embed_kernel_size = energy_embed_kernel_size self.energy_predictor_channels = energy_predictor_channels self.energy_predictor_dropout = energy_predictor_dropout self.energy_predictor_kernel_size = energy_predictor_kernel_size self.energy_predictor_layers = energy_predictor_layers self.pitch_embed_dropout = pitch_embed_dropout self.pitch_embed_kernel_size = pitch_embed_kernel_size self.pitch_predictor_channels = pitch_predictor_channels self.pitch_predictor_dropout = pitch_predictor_dropout self.pitch_predictor_kernel_size = pitch_predictor_kernel_size self.pitch_predictor_layers = pitch_predictor_layers self.positionwise_conv_kernel_size = positionwise_conv_kernel_size self.speech_decoder_postnet_units = speech_decoder_postnet_units self.speech_decoder_postnet_dropout = speech_decoder_postnet_dropout self.speech_decoder_postnet_kernel = speech_decoder_postnet_kernel self.speech_decoder_postnet_layers = speech_decoder_postnet_layers self.reduction_factor = reduction_factor self.speaking_speed = speaking_speed self.stop_gradient_from_energy_predictor = stop_gradient_from_energy_predictor self.stop_gradient_from_pitch_predictor = stop_gradient_from_pitch_predictor self.max_source_positions = max_source_positions self.use_cnn_in_conformer = use_cnn_in_conformer self.use_macaron_style_in_conformer = use_macaron_style_in_conformer self.use_masking = use_masking self.use_weighted_masking = use_weighted_masking self.num_speakers = num_speakers self.num_languages = num_languages self.speaker_embed_dim = speaker_embed_dim self.duration_predictor_dropout_rate = duration_predictor_dropout_rate self.is_encoder_decoder = is_encoder_decoder super().__init__( is_encoder_decoder=is_encoder_decoder, **kwargs, ) class FastSpeech2ConformerHifiGanConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FastSpeech2ConformerHifiGanModel`]. It is used to instantiate a FastSpeech2Conformer HiFi-GAN vocoder model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FastSpeech2Conformer [espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: model_in_dim (`int`, *optional*, defaults to 80): The number of frequency bins in the input log-mel spectrogram. upsample_initial_channel (`int`, *optional*, defaults to 512): The number of input channels into the upsampling network. upsample_rates (`Tuple[int]` or `List[int]`, *optional*, defaults to `[8, 8, 2, 2]`): A tuple of integers defining the stride of each 1D convolutional layer in the upsampling network. The length of *upsample_rates* defines the number of convolutional layers and has to match the length of *upsample_kernel_sizes*. upsample_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[16, 16, 4, 4]`): A tuple of integers defining the kernel size of each 1D convolutional layer in the upsampling network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match the length of *upsample_rates*. resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 7, 11]`): A tuple of integers defining the kernel sizes of the 1D convolutional layers in the multi-receptive field fusion (MRF) module. resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`): A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the multi-receptive field fusion (MRF) module. initializer_range (`float`, *optional*, defaults to 0.01): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. leaky_relu_slope (`float`, *optional*, defaults to 0.1): The angle of the negative slope used by the leaky ReLU activation. normalize_before (`bool`, *optional*, defaults to `True`): Whether or not to normalize the spectrogram before vocoding using the vocoder's learned mean and variance. Example: ```python >>> from transformers import FastSpeech2ConformerHifiGan, FastSpeech2ConformerHifiGanConfig >>> # Initializing a FastSpeech2ConformerHifiGan configuration >>> configuration = FastSpeech2ConformerHifiGanConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = FastSpeech2ConformerHifiGan(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "hifigan" def __init__( self, model_in_dim=80, upsample_initial_channel=512, upsample_rates=[8, 8, 2, 2], upsample_kernel_sizes=[16, 16, 4, 4], resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], initializer_range=0.01, leaky_relu_slope=0.1, normalize_before=True, **kwargs, ): self.model_in_dim = model_in_dim self.upsample_initial_channel = upsample_initial_channel self.upsample_rates = upsample_rates self.upsample_kernel_sizes = upsample_kernel_sizes self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes self.initializer_range = initializer_range self.leaky_relu_slope = leaky_relu_slope self.normalize_before = normalize_before super().__init__(**kwargs) class FastSpeech2ConformerWithHifiGanConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`FastSpeech2ConformerWithHifiGan`]. It is used to instantiate a `FastSpeech2ConformerWithHifiGanModel` model according to the specified sub-models configurations, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FastSpeech2ConformerModel [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer) and FastSpeech2ConformerHifiGan [espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architectures. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: model_config (`typing.Dict`, *optional*): Configuration of the text-to-speech model. vocoder_config (`typing.Dict`, *optional*): Configuration of the vocoder model. model_config ([`FastSpeech2ConformerConfig`], *optional*): Configuration of the text-to-speech model. vocoder_config ([`FastSpeech2ConformerHiFiGanConfig`], *optional*): Configuration of the vocoder model. Example: ```python >>> from transformers import ( ... FastSpeech2ConformerConfig, ... FastSpeech2ConformerHifiGanConfig, ... FastSpeech2ConformerWithHifiGanConfig, ... FastSpeech2ConformerWithHifiGan, ... ) >>> # Initializing FastSpeech2ConformerWithHifiGan sub-modules configurations. >>> model_config = FastSpeech2ConformerConfig() >>> vocoder_config = FastSpeech2ConformerHifiGanConfig() >>> # Initializing a FastSpeech2ConformerWithHifiGan module style configuration >>> configuration = FastSpeech2ConformerWithHifiGanConfig(model_config.to_dict(), vocoder_config.to_dict()) >>> # Initializing a model (with random weights) >>> model = FastSpeech2ConformerWithHifiGan(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "fastspeech2_conformer_with_hifigan" is_composition = True def __init__( self, model_config: Dict = None, vocoder_config: Dict = None, **kwargs, ): if model_config is None: model_config = {} logger.info("model_config is None. initializing the model with default values.") if vocoder_config is None: vocoder_config = {} logger.info("vocoder_config is None. initializing the coarse model with default values.") self.model_config = FastSpeech2ConformerConfig(**model_config) self.vocoder_config = FastSpeech2ConformerHifiGanConfig(**vocoder_config) super().__init__(**kwargs)
transformers/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py/0
{ "file_path": "transformers/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py", "repo_id": "transformers", "token_count": 9839 }
319
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Flava.""" import math import random from functools import lru_cache from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) # These values are taken from CLIP FLAVA_IMAGE_MEAN = OPENAI_CLIP_MEAN FLAVA_IMAGE_STD = OPENAI_CLIP_STD FLAVA_CODEBOOK_MEAN = [0.0, 0.0, 0.0] FLAVA_CODEBOOK_STD = [1.0, 1.0, 1.0] LOGIT_LAPLACE_EPS: float = 0.1 # Inspired from https://github.com/microsoft/unilm/blob/master/beit/masking_generator.py class FlavaMaskingGenerator: def __init__( self, input_size: Union[int, Tuple[int, int]] = 14, total_mask_patches: int = 75, mask_group_max_patches: Optional[int] = None, mask_group_min_patches: int = 16, mask_group_min_aspect_ratio: Optional[float] = 0.3, mask_group_max_aspect_ratio: float = None, ): if not isinstance(input_size, tuple): input_size = (input_size,) * 2 self.height, self.width = input_size self.num_patches = self.height * self.width self.total_mask_patches = total_mask_patches self.mask_group_min_patches = mask_group_min_patches self.mask_group_max_patches = total_mask_patches if mask_group_max_patches is None else mask_group_max_patches mask_group_max_aspect_ratio = mask_group_max_aspect_ratio or 1 / mask_group_min_aspect_ratio self.log_aspect_ratio = (math.log(mask_group_min_aspect_ratio), math.log(mask_group_max_aspect_ratio)) def __repr__(self): repr_str = "MaskingGenerator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)" % ( self.height, self.width, self.mask_group_min_patches, self.mask_group_max_patches, self.total_mask_patches, self.log_aspect_ratio[0], self.log_aspect_ratio[1], ) return repr_str def get_shape(self): return self.height, self.width def _mask(self, mask, max_mask_patches): delta = 0 for _attempt in range(10): target_area = random.uniform(self.mask_group_min_patches, max_mask_patches) aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) height = int(round(math.sqrt(target_area * aspect_ratio))) width = int(round(math.sqrt(target_area / aspect_ratio))) if width < self.width and height < self.height: top = random.randint(0, self.height - height) left = random.randint(0, self.width - width) num_masked = mask[top : top + height, left : left + width].sum() # Overlap if 0 < height * width - num_masked <= max_mask_patches: for i in range(top, top + height): for j in range(left, left + width): if mask[i, j] == 0: mask[i, j] = 1 delta += 1 if delta > 0: break return delta def __call__(self): mask = np.zeros(shape=self.get_shape(), dtype=int) mask_count = 0 while mask_count < self.total_mask_patches: max_mask_patches = self.total_mask_patches - mask_count max_mask_patches = min(max_mask_patches, self.mask_group_max_patches) delta = self._mask(mask, max_mask_patches) if delta == 0: break else: mask_count += delta return mask class FlavaImageProcessor(BaseImageProcessor): r""" Constructs a Flava image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in `preprocess`. size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after resizing. Can be overridden by the `size` parameter in `preprocess`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in `preprocess`. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the images. Can be overridden by the `do_center_crop` parameter in `preprocess`. crop_size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`): Size of image after the center crop `(crop_size["height"], crop_size["width"])`. Can be overridden by the `crop_size` parameter in `preprocess`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in `preprocess`. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in `preprocess`. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in `preprocess`. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. return_image_mask (`bool`, *optional*, defaults to `False`): Whether to return the image mask. Can be overridden by the `return_image_mask` parameter in `preprocess`. input_size_patches (`int`, *optional*, defaults to 14): Number of patches in the image in height and width direction. 14x14 = 196 total patches. Can be overridden by the `input_size_patches` parameter in `preprocess`. total_mask_patches (`int`, *optional*, defaults to 75): Total number of patches that should be masked. Can be overridden by the `total_mask_patches` parameter in `preprocess`. mask_group_min_patches (`int`, *optional*, defaults to 16): Minimum number of patches that should be masked. Can be overridden by the `mask_group_min_patches` parameter in `preprocess`. mask_group_max_patches (`int`, *optional*): Maximum number of patches that should be masked. Can be overridden by the `mask_group_max_patches` parameter in `preprocess`. mask_group_min_aspect_ratio (`float`, *optional*, defaults to 0.3): Minimum aspect ratio of the mask window. Can be overridden by the `mask_group_min_aspect_ratio` parameter in `preprocess`. mask_group_max_aspect_ratio (`float`, *optional*): Maximum aspect ratio of the mask window. Can be overridden by the `mask_group_max_aspect_ratio` parameter in `preprocess`. codebook_do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input for codebook to a certain. Can be overridden by the `codebook_do_resize` parameter in `preprocess`. `codebook_size`. codebook_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Resize the input for codebook to the given size. Can be overridden by the `codebook_size` parameter in `preprocess`. codebook_resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`): Resampling filter to use if resizing the codebook image. Can be overridden by the `codebook_resample` parameter in `preprocess`. codebook_do_center_crop (`bool`, *optional*, defaults to `True`): Whether to crop the input for codebook at the center. If the input size is smaller than `codebook_crop_size` along any edge, the image is padded with 0's and then center cropped. Can be overridden by the `codebook_do_center_crop` parameter in `preprocess`. codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Desired output size for codebook input when applying center-cropping. Can be overridden by the `codebook_crop_size` parameter in `preprocess`. codebook_do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input for codebook by the specified scale `codebook_rescale_factor`. Can be overridden by the `codebook_do_rescale` parameter in `preprocess`. codebook_rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Defines the scale factor to use if rescaling the codebook image. Can be overridden by the `codebook_rescale_factor` parameter in `preprocess`. codebook_do_map_pixels (`bool`, *optional*, defaults to `True`): Whether to map the pixel values of the codebook input to (1 - 2e)x + e. Can be overridden by the `codebook_do_map_pixels` parameter in `preprocess`. codebook_do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input for codebook with `codebook_image_mean` and `codebook_image_std`. Can be overridden by the `codebook_do_normalize` parameter in `preprocess`. codebook_image_mean (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0, 0, 0]`): The sequence of means for each channel, to be used when normalizing images for codebook. Can be overridden by the `codebook_image_mean` parameter in `preprocess`. codebook_image_std (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0.5, 0.5, 0.5]`): The sequence of standard deviations for each channel, to be used when normalizing images for codebook. Can be overridden by the `codebook_image_std` parameter in `preprocess`. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, Iterable[float]]] = None, image_std: Optional[Union[float, Iterable[float]]] = None, # Mask related params return_image_mask: bool = False, input_size_patches: int = 14, total_mask_patches: int = 75, mask_group_min_patches: int = 16, mask_group_max_patches: Optional[int] = None, mask_group_min_aspect_ratio: float = 0.3, mask_group_max_aspect_ratio: Optional[float] = None, # Codebook related params return_codebook_pixels: bool = False, codebook_do_resize: bool = True, codebook_size: bool = None, codebook_resample: int = PILImageResampling.LANCZOS, codebook_do_center_crop: bool = True, codebook_crop_size: int = None, codebook_do_rescale: bool = True, codebook_rescale_factor: Union[int, float] = 1 / 255, codebook_do_map_pixels: bool = True, codebook_do_normalize: bool = True, codebook_image_mean: Optional[Union[float, Iterable[float]]] = None, codebook_image_std: Optional[Union[float, Iterable[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 224, "width": 224} size = get_size_dict(size) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, param_name="crop_size") codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112} codebook_size = get_size_dict(codebook_size, param_name="codebook_size") codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {"height": 112, "width": 112} codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size") self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else FLAVA_IMAGE_MEAN self.image_std = image_std if image_std is not None else FLAVA_IMAGE_STD self.return_image_mask = return_image_mask self.input_size_patches = input_size_patches self.total_mask_patches = total_mask_patches self.mask_group_min_patches = mask_group_min_patches self.mask_group_max_patches = mask_group_max_patches self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio self.return_codebook_pixels = return_codebook_pixels self.codebook_do_resize = codebook_do_resize self.codebook_size = codebook_size self.codebook_resample = codebook_resample self.codebook_do_center_crop = codebook_do_center_crop self.codebook_crop_size = codebook_crop_size self.codebook_do_rescale = codebook_do_rescale self.codebook_rescale_factor = codebook_rescale_factor self.codebook_do_map_pixels = codebook_do_map_pixels self.codebook_do_normalize = codebook_do_normalize self.codebook_image_mean = codebook_image_mean self.codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else FLAVA_CODEBOOK_MEAN self.codebook_image_std = codebook_image_std if codebook_image_std is not None else FLAVA_CODEBOOK_STD self._valid_processor_keys = [ "images", "do_resize", "size", "resample", "do_center_crop", "crop_size", "do_rescale", "rescale_factor", "do_normalize", "image_mean", "image_std", "return_image_mask", "input_size_patches", "total_mask_patches", "mask_group_min_patches", "mask_group_max_patches", "mask_group_min_aspect_ratio", "mask_group_max_aspect_ratio", "return_codebook_pixels", "codebook_do_resize", "codebook_size", "codebook_resample", "codebook_do_center_crop", "codebook_crop_size", "codebook_do_rescale", "codebook_rescale_factor", "codebook_do_map_pixels", "codebook_do_normalize", "codebook_image_mean", "codebook_image_std", "return_tensors", "data_format", "input_data_format", ] @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `FlavaImageProcessor.from_pretrained(checkpoint, codebook_size=600)` """ image_processor_dict = image_processor_dict.copy() if "codebook_size" in kwargs: image_processor_dict["codebook_size"] = kwargs.pop("codebook_size") if "codebook_crop_size" in kwargs: image_processor_dict["codebook_crop_size"] = kwargs.pop("codebook_crop_size") return super().from_dict(image_processor_dict, **kwargs) @lru_cache() def masking_generator( self, input_size_patches, total_mask_patches, mask_group_min_patches, mask_group_max_patches, mask_group_min_aspect_ratio, mask_group_max_aspect_ratio, ) -> FlavaMaskingGenerator: return FlavaMaskingGenerator( input_size=input_size_patches, total_mask_patches=total_mask_patches, mask_group_min_patches=mask_group_min_patches, mask_group_max_patches=mask_group_max_patches, mask_group_min_aspect_ratio=mask_group_min_aspect_ratio, mask_group_max_aspect_ratio=mask_group_max_aspect_ratio, ) # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def map_pixels(self, image: np.ndarray) -> np.ndarray: return (1 - 2 * LOGIT_LAPLACE_EPS) * image + LOGIT_LAPLACE_EPS def _preprocess_image( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_map_pixels: bool = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """Preprocesses a single image.""" validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. image = to_numpy_array(image) if is_scaled_image(image) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) if do_map_pixels: image = self.map_pixels(image) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: Optional[bool] = None, crop_size: Optional[Dict[str, int]] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, # Mask related params return_image_mask: Optional[bool] = None, input_size_patches: Optional[int] = None, total_mask_patches: Optional[int] = None, mask_group_min_patches: Optional[int] = None, mask_group_max_patches: Optional[int] = None, mask_group_min_aspect_ratio: Optional[float] = None, mask_group_max_aspect_ratio: Optional[float] = None, # Codebook related params return_codebook_pixels: Optional[bool] = None, codebook_do_resize: Optional[bool] = None, codebook_size: Optional[Dict[str, int]] = None, codebook_resample: Optional[int] = None, codebook_do_center_crop: Optional[bool] = None, codebook_crop_size: Optional[Dict[str, int]] = None, codebook_do_rescale: Optional[bool] = None, codebook_rescale_factor: Optional[float] = None, codebook_do_map_pixels: Optional[bool] = None, codebook_do_normalize: Optional[bool] = None, codebook_image_mean: Optional[Iterable[float]] = None, codebook_image_std: Optional[Iterable[float]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_image_mask (`bool`, *optional*, defaults to `self.return_image_mask`): Whether to return the image mask. input_size_patches (`int`, *optional*, defaults to `self.input_size_patches`): Size of the patches to extract from the image. total_mask_patches (`int`, *optional*, defaults to `self.total_mask_patches`): Total number of patches to extract from the image. mask_group_min_patches (`int`, *optional*, defaults to `self.mask_group_min_patches`): Minimum number of patches to extract from the image. mask_group_max_patches (`int`, *optional*, defaults to `self.mask_group_max_patches`): Maximum number of patches to extract from the image. mask_group_min_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_min_aspect_ratio`): Minimum aspect ratio of the patches to extract from the image. mask_group_max_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_max_aspect_ratio`): Maximum aspect ratio of the patches to extract from the image. return_codebook_pixels (`bool`, *optional*, defaults to `self.return_codebook_pixels`): Whether to return the codebook pixels. codebook_do_resize (`bool`, *optional*, defaults to `self.codebook_do_resize`): Whether to resize the codebook pixels. codebook_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_size`): Size of the codebook pixels. codebook_resample (`int`, *optional*, defaults to `self.codebook_resample`): Resampling filter to use if resizing the codebook pixels. This can be one of the enum `PILImageResampling`, Only has an effect if `codebook_do_resize` is set to `True`. codebook_do_center_crop (`bool`, *optional*, defaults to `self.codebook_do_center_crop`): Whether to center crop the codebook pixels. codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_crop_size`): Size of the center crop of the codebook pixels. Only has an effect if `codebook_do_center_crop` is set to `True`. codebook_do_rescale (`bool`, *optional*, defaults to `self.codebook_do_rescale`): Whether to rescale the codebook pixels values between [0 - 1]. codebook_rescale_factor (`float`, *optional*, defaults to `self.codebook_rescale_factor`): Rescale factor to rescale the codebook pixels by if `codebook_do_rescale` is set to `True`. codebook_do_map_pixels (`bool`, *optional*, defaults to `self.codebook_do_map_pixels`): Whether to map the codebook pixels values. codebook_do_normalize (`bool`, *optional*, defaults to `self.codebook_do_normalize`): Whether to normalize the codebook pixels. codebook_image_mean (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_mean`): Codebook pixels mean to normalize the codebook pixels by if `codebook_do_normalize` is set to `True`. codebook_image_std (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_std`): Codebook pixels standard deviation to normalize the codebook pixels by if `codebook_do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std return_image_mask = return_image_mask if return_image_mask is not None else self.return_image_mask input_size_patches = input_size_patches if input_size_patches is not None else self.input_size_patches total_mask_patches = total_mask_patches if total_mask_patches is not None else self.total_mask_patches mask_group_min_patches = ( mask_group_min_patches if mask_group_min_patches is not None else self.mask_group_min_patches ) mask_group_max_patches = ( mask_group_max_patches if mask_group_max_patches is not None else self.mask_group_max_patches ) mask_group_min_aspect_ratio = ( mask_group_min_aspect_ratio if mask_group_min_aspect_ratio is not None else self.mask_group_min_aspect_ratio ) mask_group_max_aspect_ratio = ( mask_group_max_aspect_ratio if mask_group_max_aspect_ratio is not None else self.mask_group_max_aspect_ratio ) return_codebook_pixels = ( return_codebook_pixels if return_codebook_pixels is not None else self.return_codebook_pixels ) codebook_do_resize = codebook_do_resize if codebook_do_resize is not None else self.codebook_do_resize codebook_size = codebook_size if codebook_size is not None else self.codebook_size codebook_size = get_size_dict(codebook_size, param_name="codebook_size") codebook_resample = codebook_resample if codebook_resample is not None else self.codebook_resample codebook_do_rescale = codebook_do_rescale if codebook_do_rescale is not None else self.codebook_do_rescale codebook_rescale_factor = ( codebook_rescale_factor if codebook_rescale_factor is not None else self.codebook_rescale_factor ) codebook_do_center_crop = ( codebook_do_center_crop if codebook_do_center_crop is not None else self.codebook_do_center_crop ) codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else self.codebook_crop_size codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size") codebook_do_map_pixels = ( codebook_do_map_pixels if codebook_do_map_pixels is not None else self.codebook_do_map_pixels ) codebook_do_normalize = ( codebook_do_normalize if codebook_do_normalize is not None else self.codebook_do_normalize ) codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else self.codebook_image_mean codebook_image_std = codebook_image_std if codebook_image_std is not None else self.codebook_image_std images = make_list_of_images(images) validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) processed_images = [ self._preprocess_image( image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_map_pixels=False, data_format=data_format, input_data_format=input_data_format, ) for img in images ] data = {"pixel_values": processed_images} if return_codebook_pixels: codebook_images = [ self._preprocess_image( image=img, do_resize=codebook_do_resize, size=codebook_size, resample=codebook_resample, do_center_crop=codebook_do_center_crop, crop_size=codebook_crop_size, do_rescale=codebook_do_rescale, rescale_factor=codebook_rescale_factor, do_normalize=codebook_do_normalize, image_mean=codebook_image_mean, image_std=codebook_image_std, do_map_pixels=codebook_do_map_pixels, data_format=data_format, input_data_format=input_data_format, ) for img in images ] data["codebook_pixel_values"] = codebook_images if return_image_mask: mask_generator = self.masking_generator( input_size_patches=input_size_patches, total_mask_patches=total_mask_patches, mask_group_min_patches=mask_group_min_patches, mask_group_max_patches=mask_group_max_patches, mask_group_min_aspect_ratio=mask_group_min_aspect_ratio, mask_group_max_aspect_ratio=mask_group_max_aspect_ratio, ) masks = [mask_generator() for _ in images] data["bool_masked_pos"] = masks return BatchFeature(data=data, tensor_type=return_tensors)
transformers/src/transformers/models/flava/image_processing_flava.py/0
{ "file_path": "transformers/src/transformers/models/flava/image_processing_flava.py", "repo_id": "transformers", "token_count": 16962 }
320
# coding=utf-8 # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Original implementation: https://github.com/pytorch/fairseq/tree/master/examples/wmt19 # Authors: # - @alexeib Alexei Baevski # - @edunov Sergey Edunov # - @michaelauli Michael Auli # - @myleott Myle Ott # - @nng555 Nathan Ng # - David Grangier # - Kyra Yee # # Paper: Facebook FAIR's WMT19 News Translation Task Submission https://arxiv.org/abs/1907.06616 # """PyTorch Fairseq model, ported from https://github.com/pytorch/fairseq/tree/master/examples/wmt19""" import math from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import Tensor, nn from torch.nn import CrossEntropyLoss, LayerNorm from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_fsmt import FSMTConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/wmt19-ru-en" _CONFIG_FOR_DOC = "FSMTConfig" # See all FSMT models at https://huggingface.co/models?filter=fsmt # Porting notes: # this one is modeled after BartModel* # # Currently only translation (fairseq also has weights for LM) # # fairseq provides weights for ru-en, en-ru and de-en, en-de pairs. All have been ported. # - ru-en, en-ru use asymmetric vocab # - de-en, en-de use a merged single vocab (but the code works as if they are separate) # # Differences with Bart: # - not using bos token # - 2 separate vocabs (src and target) # - embed weights aren't tied # - uses a model Ensemble (but that part isn't ported/implemented yet) - so we # aren't getting as good of a BLEU score # - uses a projection layer at the end of the decoder # - doesn't use final_logits_bias # - beam search: stops as soon as num_beams == len(hypos) (whereas transformers # is not satisfied there and will continue searching until the next cycles # aren't promising something better), comparing BLEU scores - the transformers # algorithm is slightly superior, therefore using the latter. But if you want # to match fairseq outputs, you need to pass ``early_stopping=True`` to ``generate()``. # # SinusoidalPositionalEmbedding is slightly different from Bart's - generates # different embeddings. This implementation is copied verbatim from fairseq with # some small changes to make it work here. # # Other changes: # - doesn't support use_cache as Bart's version does # # # FSMTConfig changes with BartConfig # # Differences with BART: # - src/tgt vocabs aren't shared # - token embeddings aren't shared # - needs a language pair # - scale_embedding are True # # some unused args were removed too # # # TODO: # - port model ensemble (fs uses 4 model checkpoints) # - solve beam search discrepancies # docstyle-ignore """ Here is how to compare BLEU scores against fairseq implementation: # en-ru export PAIR=en-ru export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=50 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS # (fairseq BLEU: 36.4 http://matrix.statmt.org/matrix/output/1914?score_id=37605) # ru-en export PAIR=ru-en export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=50 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS # (fairseq BLEU: 41.3 http://matrix.statmt.org/matrix/output/1907?run_id=6937) # de-en export PAIR=de-en export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=50 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS # (fairseq BLEU: 42.3 http://matrix.statmt.org/matrix/output/1902?run_id=6750) # en-de export PAIR=en-de export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS # (fairseq BLEU: 43.1 http://matrix.statmt.org/matrix/output/1909?run_id=6862) """ FSMT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`FSMTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ FSMT_GENERATION_EXAMPLE = r""" Translation example:: ```python >>> from transformers import AutoTokenizer, FSMTForConditionalGeneration >>> mname = "facebook/wmt19-ru-en" >>> model = FSMTForConditionalGeneration.from_pretrained(mname) >>> tokenizer = AutoTokenizer.from_pretrained(mname) >>> src_text = "Машинное обучение - это здорово, не так ли?" >>> input_ids = tokenizer(src_text, return_tensors="pt").input_ids >>> outputs = model.generate(input_ids, num_beams=5, num_return_sequences=3) >>> tokenizer.decode(outputs[0], skip_special_tokens=True) "Machine learning is great, isn't it?" ``` """ FSMT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`FSTMTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) FSMT uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`Tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`Tuple(torch.FloatTensor)` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ def invert_mask(attention_mask): """Turns 1->0, 0->1, False->True, True-> False""" assert attention_mask.dim() == 2 return attention_mask.eq(0) def triu_onnx(x, diagonal=0): l = x.shape[0] arange = torch.arange(l, device=x.device) mask = arange.expand(l, l) arange = arange.unsqueeze(-1) if diagonal: arange = arange + diagonal mask = mask >= arange return x.masked_fill(mask == 0, 0) def _prepare_fsmt_decoder_inputs( config, input_ids, decoder_input_ids=None, decoder_padding_mask=None, causal_mask_dtype=torch.float32, ): """ Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if none are provided. This mimics the default behavior in fairseq. To override it pass in masks. Note: this is not called during generation """ pad_token_id = config.pad_token_id if decoder_input_ids is None: decoder_input_ids = shift_tokens_right(input_ids, pad_token_id) bsz, tgt_len = decoder_input_ids.size() if decoder_padding_mask is None: decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id) else: decoder_padding_mask = invert_mask(decoder_padding_mask) causal_mask = triu_onnx(fill_with_neg_inf(torch.zeros(tgt_len, tgt_len, dtype=causal_mask_dtype)), 1).to( device=decoder_input_ids.device ) return decoder_input_ids, decoder_padding_mask, causal_mask class PretrainedFSMTModel(PreTrainedModel): config_class = FSMTConfig base_model_prefix = "model" def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, SinusoidalPositionalEmbedding): pass elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs def _make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer # Helper Functions, mostly for making masks def _check_shapes(shape_1, shape2): if shape_1 != shape2: raise AssertionError(f"shape mismatch: {shape_1} != {shape2}") def shift_tokens_right(input_ids, pad_token_id): """Shift input ids one token to the right, and wrap the last non pad token (usually <eos>).""" # replace possible -100 values in labels by `pad_token_id` input_ids.masked_fill_(input_ids == -100, pad_token_id) prev_output_tokens = input_ids.clone() index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1) prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze() prev_output_tokens[:, 1:] = input_ids[:, :-1] return prev_output_tokens def make_padding_mask(input_ids, padding_idx=1): """True for pad tokens""" padding_mask = input_ids.eq(padding_idx) if not padding_mask.any(): padding_mask = None return padding_mask # Helper Modules class EncoderLayer(nn.Module): def __init__(self, config: FSMTConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def forward(self, x, encoder_padding_mask, layer_head_mask, output_attentions=False): """ Args: x (`torch.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)* encoder_padding_mask (`torch.ByteTensor`): binary ByteTensor of shape *(batch, src_len)* where padding elements are indicated by `1`. for t_tgt, t_src is excluded (or masked out), =0 means it is included in attention layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size *(config.encoder_attention_heads,)*. Returns: encoded output of shape *(seq_len, batch, embed_dim)* """ residual = x x, attn_weights = self.self_attn( query=x, key=x, key_padding_mask=encoder_padding_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.final_layer_norm(x) return x, attn_weights class FSMTEncoder(nn.Module): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`EncoderLayer`]. Args: config: FSMTConfig """ def __init__(self, config: FSMTConfig, embed_tokens): super().__init__() self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.padding_idx = embed_tokens.padding_idx self.embed_tokens = embed_tokens embed_dim = embed_tokens.embedding_dim self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_positions = SinusoidalPositionalEmbedding( config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx ) self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)]) # type: List[EncoderLayer] def forward( self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: torch.Tensor = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): """ Args: input_ids (`torch.LongTensor`): tokens in the source language of shape *(batch, src_len)* attention_mask (`torch.LongTensor`): indicating which indices are padding tokens inputs_embeds (`torch.FloatTensor`): embedding vectors of shape *(batch, src_len, embed_dim)* head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Returns: BaseModelOutput or Tuple comprised of: - **x** (`torch.Tensor`): the last encoder layer's output of shape *(src_len, batch, embed_dim)* - **encoder_states** (`Tuple(torch.FloatTensor`)): all intermediate hidden states of shape *(src_len, batch, embed_dim)*. Only populated if *output_hidden_states:* is True. - **all_attentions** (`Tuple(torch.FloatTensor`)): Attention weights for each layer. During training might not be of length n_layers because of layer dropout. """ # check attention mask and invert if attention_mask is not None: attention_mask = invert_mask(attention_mask) if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_ids) elif inputs_embeds is not None: inputs_embeds = inputs_embeds * self.embed_scale # We assume zeros hidden states correspond to padding tokens # and create `position_ids` where inputs_embeds[:, :, 0] == 0 position_ids = inputs_embeds[:, :, 0].masked_fill( inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx ) embed_pos = self.embed_positions(position_ids) else: raise ValueError("You have to specify either input_ids or inputs_embeds") x = inputs_embeds + embed_pos x = nn.functional.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: x = x.transpose(0, 1) # T x B x C -> B x T x C encoder_states += (x,) x = x.transpose(0, 1) # B x T x C -> T x B x C # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) if self.training and (dropout_probability < self.layerdrop): # skip the layer attn = None else: x, attn = encoder_layer( x, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) if output_attentions: all_attentions = all_attentions + (attn,) # T x B x C -> B x T x C x = x.transpose(0, 1) if output_hidden_states: encoder_states += (x,) if not return_dict: return tuple(v for v in [x, encoder_states, all_attentions] if v is not None) return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions) class DecoderLayer(nn.Module): def __init__(self, config: FSMTConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = Attention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.encoder_attn = Attention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, encoder_decoder_attention=True, ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def forward( self, x, encoder_hidden_states, encoder_attn_mask=None, layer_state=None, causal_mask=None, layer_head_mask=None, cross_attn_layer_head_mask=None, decoder_padding_mask=None, output_attentions=False, ): residual = x if layer_state is None: layer_state = {} # Self Attention x, self_attn_weights = self.self_attn( query=x, key=x, layer_state=layer_state, # adds keys to layer state key_padding_mask=decoder_padding_mask, attn_mask=causal_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.self_attn_layer_norm(x) # Cross attention residual = x assert self.encoder_attn.cache_key != self.self_attn.cache_key x, cross_attn_weights = self.encoder_attn( query=x, key=encoder_hidden_states, key_padding_mask=encoder_attn_mask, layer_state=layer_state, # mutates layer state layer_head_mask=cross_attn_layer_head_mask, output_attentions=output_attentions, ) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.encoder_attn_layer_norm(x) # Fully Connected residual = x x = self.activation_fn(self.fc1(x)) x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.final_layer_norm(x) return ( x, self_attn_weights, layer_state, cross_attn_weights, ) # layer_state = cache for decoding class FSMTDecoder(nn.Module): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DecoderLayer`] Args: config: FSMTConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: FSMTConfig, embed_tokens: nn.Embedding): super().__init__() self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = embed_tokens.padding_idx self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = embed_tokens embed_dim = embed_tokens.embedding_dim self.embed_positions = SinusoidalPositionalEmbedding( config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx ) self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.decoder_layers)]) # type: List[DecoderLayer] if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.embed_tokens.weight, modifier_rank=None): embed_tokens_weight_shape = self.embed_tokens.weight.shape else: embed_tokens_weight_shape = self.embed_tokens.weight.shape self.output_projection = nn.Linear(embed_tokens_weight_shape[1], embed_tokens_weight_shape[0], bias=False) self.output_projection.weight = self.embed_tokens.weight def forward( self, input_ids: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_padding_mask: torch.Tensor, decoder_padding_mask: torch.Tensor, decoder_causal_mask: torch.Tensor, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): """ Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: input_ids (`torch.LongTensor` of shape `(batch, tgt_len)`): previous decoder outputs for teacher forcing encoder_hidden_states: output from the encoder, used for encoder-side attention encoder_padding_mask: for ignoring pad tokens past_key_values (dict or None): dictionary used for storing state during generation head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Returns: BaseModelOutputWithPast or tuple: - the decoder's features of shape *(batch, tgt_len, embed_dim)* - the cache - hidden states - attentions """ # check attention mask and invert if encoder_padding_mask is not None: encoder_padding_mask = invert_mask(encoder_padding_mask) if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: # embed positions positions = self.embed_positions(input_ids) if use_cache: input_ids = input_ids[:, -1:] positions = positions[:, -1:] # happens after we embed them x = self.embed_tokens(input_ids) * self.embed_scale elif inputs_embeds is not None: # We assume zeros hidden states correspond to padding tokens # and create `position_ids` where inputs_embeds[:, :, 0] == 0 position_ids = inputs_embeds[:, :, 0].masked_fill( inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx ) positions = self.embed_positions(position_ids) x = inputs_embeds * self.embed_scale else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") x += positions x = nn.functional.dropout(x, p=self.dropout, training=self.training) # Convert to FSMT output format: (BS, seq_len, model_dim) -> (seq_len, BS, model_dim) x = x.transpose(0, 1) encoder_hidden_states = encoder_hidden_states.transpose(0, 1) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attns = () if output_attentions else None next_decoder_cache = [] # check if head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: assert attn_mask.size()[0] == (len(self.layers)), ( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: x = x.transpose(0, 1) all_hidden_states += (x,) x = x.transpose(0, 1) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_state = past_key_values[idx] if past_key_values is not None else None x, layer_self_attn, layer_past, layer_cross_attn = decoder_layer( x, encoder_hidden_states, encoder_attn_mask=encoder_padding_mask, decoder_padding_mask=decoder_padding_mask, layer_state=layer_state, causal_mask=decoder_causal_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), output_attentions=output_attentions, ) if use_cache: next_decoder_cache.append(layer_past.copy()) if output_attentions: all_self_attns += (layer_self_attn,) all_cross_attns += (layer_cross_attn,) # add hidden states from the last decoder layer if output_hidden_states: x = x.transpose(0, 1) all_hidden_states += (x,) x = x.transpose(0, 1) # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim) x = x.transpose(0, 1) encoder_hidden_states = encoder_hidden_states.transpose(0, 1) x = self.output_projection(x) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [x, next_cache, all_hidden_states, all_self_attns, all_cross_attns] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attns, ) def _reorder_buffer(attn_cache, new_order): for k, input_buffer_k in attn_cache.items(): if input_buffer_k is not None: attn_cache[k] = input_buffer_k.index_select(0, new_order) return attn_cache class Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim, num_heads, dropout=0.0, bias=True, encoder_decoder_attention=False, # otherwise self_attention ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" self.scaling = self.head_dim**-0.5 self.encoder_decoder_attention = encoder_decoder_attention self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self" def _shape(self, tensor, seq_len, bsz): return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) def forward( self, query, key: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, layer_state: Optional[Dict[str, Optional[Tensor]]] = None, attn_mask: Optional[Tensor] = None, layer_head_mask: Optional[Tensor] = None, output_attentions=False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time(SeqLen) x Batch x Channel""" static_kv: bool = self.encoder_decoder_attention tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] # get here for encoder decoder cause of static_kv if layer_state is not None: # reuse k,v and encoder_padding_mask saved_state = layer_state.get(self.cache_key, {}) if "prev_key" in saved_state and static_kv: # previous time steps are cached - no need to recompute key and value if they are static key = None else: saved_state = None layer_state = {} q = self.q_proj(query) * self.scaling if static_kv: if key is None: k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: k = self.k_proj(query) v = self.v_proj(query) q = self._shape(q, tgt_len, bsz) if k is not None: k = self._shape(k, -1, bsz) if v is not None: v = self._shape(v, -1, bsz) if saved_state is not None: k, v, key_padding_mask = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz) # Update cache layer_state[self.cache_key] = { "prev_key": k.view(bsz, self.num_heads, -1, self.head_dim), "prev_value": v.view(bsz, self.num_heads, -1, self.head_dim), "prev_key_padding_mask": key_padding_mask if not static_kv else None, } assert k is not None src_len = k.size(1) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len) if attn_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) # This is part of a workaround to get around fork/join parallelism not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None assert key_padding_mask is None or key_padding_mask.size()[:2] == ( bsz, src_len, ) if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2) attn_weights = attn_weights.masked_fill(reshaped, torch.finfo(attn_weights.dtype).min) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: assert layer_head_mask.size() == ( self.num_heads, ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # make sure that attn_weights are included in graph attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training, ) assert v is not None attn_output = torch.bmm(attn_probs, v) assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz): # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) assert k is not None and v is not None prev_key_padding_mask: Optional[Tensor] = saved_state.get("prev_key_padding_mask", None) if prev_key_padding_mask is not None: if static_kv: new_key_padding_mask = prev_key_padding_mask else: new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1) else: new_key_padding_mask = key_padding_mask return k, v, new_key_padding_mask def fill_with_neg_inf(t): """FP16-compatible function that fills a input_ids with -inf.""" return t.float().fill_(torch.finfo(t.dtype).min).type_as(t) # Public API def _get_shape(t): return getattr(t, "shape", None) @add_start_docstrings( "The bare FSMT Model outputting raw hidden-states without any specific head on top.", FSMT_START_DOCSTRING, ) class FSMTModel(PretrainedFSMTModel): _tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"] def __init__(self, config: FSMTConfig): super().__init__(config) padding_idx = config.pad_token_id encoder_embed_tokens = nn.Embedding(config.src_vocab_size, config.d_model, padding_idx) decoder_embed_tokens = nn.Embedding(config.tgt_vocab_size, config.d_model, padding_idx) self.encoder = FSMTEncoder(config, encoder_embed_tokens) self.decoder = FSMTDecoder(config, decoder_embed_tokens) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.decoder.embed_tokens, self.get_input_embeddings()) self._tie_or_clone_weights(self.decoder.output_projection, self.get_input_embeddings()) @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, past_key_values: Optional[Tuple[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]: if decoder_input_ids is None: use_cache = False output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # make masks if user doesn't supply if not use_cache and input_ids is not None: decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_fsmt_decoder_inputs( self.config, input_ids, decoder_input_ids=decoder_input_ids, decoder_padding_mask=decoder_attention_mask, causal_mask_dtype=self.decoder.embed_tokens.weight.dtype, ) else: decoder_padding_mask, causal_mask = None, None if decoder_input_ids is None and decoder_inputs_embeds is None: raise ValueError("Make sure that `decoder_input_ids` or `decoder_inputs_embeds` are passed.") if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=False elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) decoder_outputs = self.decoder( decoder_input_ids, encoder_outputs[0], attention_mask, decoder_padding_mask, decoder_causal_mask=causal_mask, inputs_embeds=decoder_inputs_embeds, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): return self.encoder.embed_tokens def set_input_embeddings(self, value): self.encoder.embed_tokens = value def get_output_embeddings(self): return self.decoder.embed_tokens def set_output_embeddings(self, value): self.decoder.embed_tokens = value @add_start_docstrings( "The FSMT Model with a language modeling head. Can be used for summarization.", FSMT_START_DOCSTRING ) class FSMTForConditionalGeneration(PretrainedFSMTModel): base_model_prefix = "model" _tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"] def __init__(self, config: FSMTConfig): super().__init__(config) base_model = FSMTModel(config) self.model = base_model # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(FSMT_GENERATION_EXAMPLE) def forward( self, input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, past_key_values: Optional[Tuple[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.model( input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_inputs_embeds=decoder_inputs_embeds, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = outputs[0] masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # TODO(SS): do we need to ignore pad tokens in labels? masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.tgt_vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = [] for layer_past in past_key_values: # get the correct batch idx from decoder layer's batch dim for cross and self-attn layer_past_new = { attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items() } reordered_past.append(layer_past_new) return reordered_past def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder def get_output_embeddings(self): return self.model.decoder.embed_tokens def set_output_embeddings(self, value): self.model.decoder.embed_tokens = value class SinusoidalPositionalEmbedding(nn.Embedding): """ This module produces sinusoidal positional embeddings of any length. We don't want to save the weight of this embedding since it's not trained (deterministic) and it can be huge. Padding symbols are ignored. These embeddings get automatically extended in forward if more positions is needed. """ def __init__(self, num_positions, embedding_dim, padding_idx): self.make_weight(num_positions, embedding_dim, padding_idx) def make_weight(self, num_positions, embedding_dim, padding_idx): weight = self.get_embedding(num_positions, embedding_dim, padding_idx) if not hasattr(self, "weight"): # in ___init__ super().__init__(num_positions, embedding_dim, padding_idx, _weight=weight) else: # in forward put the weights on the correct dtype and device of the param weight = weight.to(dtype=self.weight.dtype, device=self.weight.device) self.weight = nn.Parameter(weight) self.weight.detach_() self.weight.requires_grad = False @staticmethod def get_embedding(num_embeddings, embedding_dim, padding_idx): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb @staticmethod def make_positions(tensor, padding_idx: int): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. """ # The series of casts and type-conversions here are carefully # balanced to both work with ONNX export and XLA. In particular XLA # prefers ints, cumsum defaults to output longs, and ONNX doesn't know # how to handle the dtype kwarg in cumsum. mask = tensor.ne(padding_idx).int() return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx def forward( self, input, incremental_state: Optional[Any] = None, timestep: Optional[Tensor] = None, ): """Input is expected to be of size [bsz x seqlen].""" bsz, seq_len = input.shape[:2] max_pos = self.padding_idx + 1 + seq_len if max_pos > self.weight.size(0): # expand embeddings if needed self.make_weight(max_pos, self.embedding_dim, self.padding_idx) positions = self.make_positions(input, self.padding_idx) return super().forward(positions)
transformers/src/transformers/models/fsmt/modeling_fsmt.py/0
{ "file_path": "transformers/src/transformers/models/fsmt/modeling_fsmt.py", "repo_id": "transformers", "token_count": 25668 }
321
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Gemma model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP = {} class GemmaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Gemma-7B. e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 256000): Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GemmaModel`] hidden_size (`int`, *optional*, defaults to 3072): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 24576): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 28): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 16): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. head_dim (`int`, *optional*, defaults to 256): The attention head dimension. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The legacy activation function. It is overwritten by the `hidden_activation`. hidden_activation (`str` or `function`, *optional*): The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"` if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. eos_token_id (`int`, *optional*, defaults to 1): End of stream token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ```python >>> from transformers import GemmaModel, GemmaConfig >>> # Initializing a Gemma gemma-7b style configuration >>> configuration = GemmaConfig() >>> # Initializing a model from the gemma-7b style configuration >>> model = GemmaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gemma" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=256000, hidden_size=3072, intermediate_size=24576, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_act="gelu_pytorch_tanh", hidden_activation=None, max_position_embeddings=8192, initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, attention_bias=False, attention_dropout=0.0, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = head_dim self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.hidden_activation = hidden_activation self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_bias = attention_bias self.attention_dropout = attention_dropout super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, )
transformers/src/transformers/models/gemma/configuration_gemma.py/0
{ "file_path": "transformers/src/transformers/models/gemma/configuration_gemma.py", "repo_id": "transformers", "token_count": 2711 }
322
# coding=utf-8 # Copyright 2022 KAIST and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch GLPN model.""" import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_glpn import GLPNConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "GLPNConfig" # Base docstring _CHECKPOINT_FOR_DOC = "vinvino02/glpn-kitti" _EXPECTED_OUTPUT_SHAPE = [1, 512, 15, 20] GLPN_PRETRAINED_MODEL_ARCHIVE_LIST = [ "vinvino02/glpn-kitti", # See all GLPN models at https://huggingface.co/models?filter=glpn ] # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.segformer.modeling_segformer.SegformerDropPath class GLPNDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) # Copied from transformers.models.segformer.modeling_segformer.SegformerOverlapPatchEmbeddings class GLPNOverlapPatchEmbeddings(nn.Module): """Construct the overlapping patch embeddings.""" def __init__(self, patch_size, stride, num_channels, hidden_size): super().__init__() self.proj = nn.Conv2d( num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=patch_size // 2, ) self.layer_norm = nn.LayerNorm(hidden_size) def forward(self, pixel_values): embeddings = self.proj(pixel_values) _, _, height, width = embeddings.shape # (batch_size, num_channels, height, width) -> (batch_size, num_channels, height*width) -> (batch_size, height*width, num_channels) # this can be fed to a Transformer layer embeddings = embeddings.flatten(2).transpose(1, 2) embeddings = self.layer_norm(embeddings) return embeddings, height, width # Copied from transformers.models.segformer.modeling_segformer.SegformerEfficientSelfAttention class GLPNEfficientSelfAttention(nn.Module): """SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT paper](https://arxiv.org/abs/2102.12122).""" def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio): super().__init__() self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads if self.hidden_size % self.num_attention_heads != 0: raise ValueError( f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention " f"heads ({self.num_attention_heads})" ) self.attention_head_size = int(self.hidden_size / self.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(self.hidden_size, self.all_head_size) self.key = nn.Linear(self.hidden_size, self.all_head_size) self.value = nn.Linear(self.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.sr_ratio = sequence_reduction_ratio if sequence_reduction_ratio > 1: self.sr = nn.Conv2d( hidden_size, hidden_size, kernel_size=sequence_reduction_ratio, stride=sequence_reduction_ratio ) self.layer_norm = nn.LayerNorm(hidden_size) def transpose_for_scores(self, hidden_states): new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size) hidden_states = hidden_states.view(new_shape) return hidden_states.permute(0, 2, 1, 3) def forward( self, hidden_states, height, width, output_attentions=False, ): query_layer = self.transpose_for_scores(self.query(hidden_states)) if self.sr_ratio > 1: batch_size, seq_len, num_channels = hidden_states.shape # Reshape to (batch_size, num_channels, height, width) hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) # Apply sequence reduction hidden_states = self.sr(hidden_states) # Reshape back to (batch_size, seq_len, num_channels) hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1) hidden_states = self.layer_norm(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.segformer.modeling_segformer.SegformerSelfOutput class GLPNSelfOutput(nn.Module): def __init__(self, config, hidden_size): super().__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.segformer.modeling_segformer.SegformerAttention with Segformer->GLPN class GLPNAttention(nn.Module): def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio): super().__init__() self.self = GLPNEfficientSelfAttention( config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio, ) self.output = GLPNSelfOutput(config, hidden_size=hidden_size) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, height, width, output_attentions=False): self_outputs = self.self(hidden_states, height, width, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.segformer.modeling_segformer.SegformerDWConv class GLPNDWConv(nn.Module): def __init__(self, dim=768): super().__init__() self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) def forward(self, hidden_states, height, width): batch_size, seq_len, num_channels = hidden_states.shape hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width) hidden_states = self.dwconv(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) return hidden_states # Copied from transformers.models.segformer.modeling_segformer.SegformerMixFFN with Segformer->GLPN class GLPNMixFFN(nn.Module): def __init__(self, config, in_features, hidden_features=None, out_features=None): super().__init__() out_features = out_features or in_features self.dense1 = nn.Linear(in_features, hidden_features) self.dwconv = GLPNDWConv(hidden_features) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dense2 = nn.Linear(hidden_features, out_features) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, height, width): hidden_states = self.dense1(hidden_states) hidden_states = self.dwconv(hidden_states, height, width) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.dense2(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.segformer.modeling_segformer.SegformerLayer with Segformer->GLPN class GLPNLayer(nn.Module): """This corresponds to the Block class in the original implementation.""" def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio): super().__init__() self.layer_norm_1 = nn.LayerNorm(hidden_size) self.attention = GLPNAttention( config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio, ) self.drop_path = GLPNDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.layer_norm_2 = nn.LayerNorm(hidden_size) mlp_hidden_size = int(hidden_size * mlp_ratio) self.mlp = GLPNMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size) def forward(self, hidden_states, height, width, output_attentions=False): self_attention_outputs = self.attention( self.layer_norm_1(hidden_states), # in GLPN, layernorm is applied before self-attention height, width, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection (with stochastic depth) attention_output = self.drop_path(attention_output) hidden_states = attention_output + hidden_states mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width) # second residual connection (with stochastic depth) mlp_output = self.drop_path(mlp_output) layer_output = mlp_output + hidden_states outputs = (layer_output,) + outputs return outputs class GLPNEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] # patch embeddings embeddings = [] for i in range(config.num_encoder_blocks): embeddings.append( GLPNOverlapPatchEmbeddings( patch_size=config.patch_sizes[i], stride=config.strides[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i], ) ) self.patch_embeddings = nn.ModuleList(embeddings) # Transformer blocks blocks = [] cur = 0 for i in range(config.num_encoder_blocks): # each block consists of layers layers = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i]): layers.append( GLPNLayer( config, hidden_size=config.hidden_sizes[i], num_attention_heads=config.num_attention_heads[i], drop_path=dpr[cur + j], sequence_reduction_ratio=config.sr_ratios[i], mlp_ratio=config.mlp_ratios[i], ) ) blocks.append(nn.ModuleList(layers)) self.block = nn.ModuleList(blocks) # Layer norms self.layer_norm = nn.ModuleList( [nn.LayerNorm(config.hidden_sizes[i]) for i in range(config.num_encoder_blocks)] ) def forward( self, pixel_values, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None batch_size = pixel_values.shape[0] hidden_states = pixel_values for idx, x in enumerate(zip(self.patch_embeddings, self.block, self.layer_norm)): embedding_layer, block_layer, norm_layer = x # first, obtain patch embeddings hidden_states, height, width = embedding_layer(hidden_states) # second, send embeddings through blocks for i, blk in enumerate(block_layer): layer_outputs = blk(hidden_states, height, width, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) # third, apply layer norm hidden_states = norm_layer(hidden_states) # fourth, optionally reshape back to (batch_size, num_channels, height, width) hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous() if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class GLPNPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GLPNConfig base_model_prefix = "glpn" main_input_name = "pixel_values" # Copied from transformers.models.segformer.modeling_segformer.SegformerPreTrainedModel._init_weights def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) GLPN_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`GLPNConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GLPN_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`GLPNImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare GLPN encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.", GLPN_START_DOCSTRING, ) class GLPNModel(GLPNPreTrainedModel): # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.__init__ with Segformer->GLPN def __init__(self, config): super().__init__(config) self.config = config # hierarchical Transformer encoder self.encoder = GLPNEncoder(config) # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.forward def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class GLPNSelectiveFeatureFusion(nn.Module): """ Selective Feature Fusion module, as explained in the [paper](https://arxiv.org/abs/2201.07436) (section 3.4). This module adaptively selects and integrates local and global features by attaining an attention map for each feature. """ def __init__(self, in_channel=64): super().__init__() self.convolutional_layer1 = nn.Sequential( nn.Conv2d(in_channels=int(in_channel * 2), out_channels=in_channel, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(in_channel), nn.ReLU(), ) self.convolutional_layer2 = nn.Sequential( nn.Conv2d(in_channels=in_channel, out_channels=int(in_channel / 2), kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(int(in_channel / 2)), nn.ReLU(), ) self.convolutional_layer3 = nn.Conv2d( in_channels=int(in_channel / 2), out_channels=2, kernel_size=3, stride=1, padding=1 ) self.sigmoid = nn.Sigmoid() def forward(self, local_features, global_features): # concatenate features along the channel dimension features = torch.cat((local_features, global_features), dim=1) # pass through convolutional layers features = self.convolutional_layer1(features) features = self.convolutional_layer2(features) features = self.convolutional_layer3(features) # apply sigmoid to get two-channel attention map attn = self.sigmoid(features) # construct hybrid features by adding element-wise hybrid_features = local_features * attn[:, 0, :, :].unsqueeze(1) + global_features * attn[ :, 1, :, : ].unsqueeze(1) return hybrid_features class GLPNDecoderStage(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() should_skip = in_channels == out_channels self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1) if not should_skip else nn.Identity() self.fusion = GLPNSelectiveFeatureFusion(out_channels) self.upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) def forward(self, hidden_state, residual=None): hidden_state = self.convolution(hidden_state) if residual is not None: hidden_state = self.fusion(hidden_state, residual) hidden_state = self.upsample(hidden_state) return hidden_state hidden_state = self.upsample(hidden_state) return hidden_state class GLPNDecoder(nn.Module): def __init__(self, config): super().__init__() # we use features from end -> start reserved_hidden_sizes = config.hidden_sizes[::-1] out_channels = config.decoder_hidden_size self.stages = nn.ModuleList( [GLPNDecoderStage(hidden_size, out_channels) for hidden_size in reserved_hidden_sizes] ) # don't fuse in first stage self.stages[0].fusion = None self.final_upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) def forward(self, hidden_states: List[torch.Tensor]) -> List[torch.Tensor]: stage_hidden_states = [] stage_hidden_state = None for hidden_state, stage in zip(hidden_states[::-1], self.stages): stage_hidden_state = stage(hidden_state, stage_hidden_state) stage_hidden_states.append(stage_hidden_state) stage_hidden_states[-1] = self.final_upsample(stage_hidden_state) return stage_hidden_states class SiLogLoss(nn.Module): r""" Implements the Scale-invariant log scale loss [Eigen et al., 2014](https://arxiv.org/abs/1406.2283). $$L=\frac{1}{n} \sum_{i} d_{i}^{2}-\frac{1}{2 n^{2}}\left(\sum_{i} d_{i}^{2}\right)$$ where $d_{i}=\log y_{i}-\log y_{i}^{*}$. """ def __init__(self, lambd=0.5): super().__init__() self.lambd = lambd def forward(self, pred, target): valid_mask = (target > 0).detach() diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask]) loss = torch.sqrt(torch.pow(diff_log, 2).mean() - self.lambd * torch.pow(diff_log.mean(), 2)) return loss class GLPNDepthEstimationHead(nn.Module): def __init__(self, config): super().__init__() self.config = config channels = config.decoder_hidden_size self.head = nn.Sequential( nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=False), nn.Conv2d(channels, 1, kernel_size=3, stride=1, padding=1), ) def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor: # use last features of the decoder hidden_states = hidden_states[self.config.head_in_index] hidden_states = self.head(hidden_states) predicted_depth = torch.sigmoid(hidden_states) * self.config.max_depth predicted_depth = predicted_depth.squeeze(dim=1) return predicted_depth @add_start_docstrings( """GLPN Model transformer with a lightweight depth estimation head on top e.g. for KITTI, NYUv2.""", GLPN_START_DOCSTRING, ) class GLPNForDepthEstimation(GLPNPreTrainedModel): def __init__(self, config): super().__init__(config) self.glpn = GLPNModel(config) self.decoder = GLPNDecoder(config) self.head = GLPNDepthEstimationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, labels: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]: r""" labels (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth depth estimation maps for computing the loss. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, GLPNForDepthEstimation >>> import torch >>> import numpy as np >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("vinvino02/glpn-kitti") >>> model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-kitti") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) ... predicted_depth = outputs.predicted_depth >>> # interpolate to original size >>> prediction = torch.nn.functional.interpolate( ... predicted_depth.unsqueeze(1), ... size=image.size[::-1], ... mode="bicubic", ... align_corners=False, ... ) >>> # visualize the prediction >>> output = prediction.squeeze().cpu().numpy() >>> formatted = (output * 255 / np.max(output)).astype("uint8") >>> depth = Image.fromarray(formatted) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs = self.glpn( pixel_values, output_attentions=output_attentions, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) hidden_states = outputs.hidden_states if return_dict else outputs[1] out = self.decoder(hidden_states) predicted_depth = self.head(out) loss = None if labels is not None: loss_fct = SiLogLoss() loss = loss_fct(predicted_depth, labels) if not return_dict: if output_hidden_states: output = (predicted_depth,) + outputs[1:] else: output = (predicted_depth,) + outputs[2:] return ((loss,) + output) if loss is not None else output return DepthEstimatorOutput( loss=loss, predicted_depth=predicted_depth, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, )
transformers/src/transformers/models/glpn/modeling_glpn.py/0
{ "file_path": "transformers/src/transformers/models/glpn/modeling_glpn.py", "repo_id": "transformers", "token_count": 13224 }
323
# coding=utf-8 # Copyright 2021 The Eleuther AI and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert GPT Neo checkpoint.""" import argparse import json from transformers import GPTNeoConfig, GPTNeoForCausalLM, load_tf_weights_in_gpt_neo from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): # Initialise PyTorch model config_json = json.load(open(config_file, "r")) config = GPTNeoConfig( hidden_size=config_json["n_embd"], num_layers=config_json["n_layer"], num_heads=config_json["n_head"], attention_types=config_json["attention_types"], max_position_embeddings=config_json["n_positions"], resid_dropout=config_json["res_dropout"], embed_dropout=config_json["embed_dropout"], attention_dropout=config_json["attn_dropout"], ) print(f"Building PyTorch model from configuration: {config}") model = GPTNeoForCausalLM(config) # Load weights from tf checkpoint load_tf_weights_in_gpt_neo(model, config, tf_checkpoint_path) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained mesh-tf model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
transformers/src/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py", "repo_id": "transformers", "token_count": 957 }
324
# coding=utf-8 # Copyright 2021 The EleutherAI and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_gptj import GPTJConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "gptj" _CONFIG_FOR_DOC = "GPTJConfig" GPTJ_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`GPTJConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ GPTJ_INPUTS_DOCSTRING = r""" Args: input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ def create_sinusoidal_positions(num_pos, dim): inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim)) sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32") sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp) sentinel = dim // 2 + dim % 2 out = np.zeros((num_pos, dim)) out[:, 0:sentinel] = sin out[:, sentinel:] = cos return jnp.array(out) def rotate_every_two(tensor): rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1) rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,)) return rotate_half_tensor def apply_rotary_pos_emb(tensor, sincos): sin_pos, cos_pos = sincos sin_pos = sin_pos[:, :, None, :].repeat(2, 3) cos_pos = cos_pos[:, :, None, :].repeat(2, 3) return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos) class FlaxGPTJAttention(nn.Module): config: GPTJConfig dtype: jnp.dtype = jnp.float32 causal: bool = True is_cross_attention: bool = False def setup(self): config = self.config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.rotary_dim = config.rotary_dim dense = partial( nn.Dense, self.embed_dim, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() self.out_proj = dense() self.resid_dropout = nn.Dropout(rate=config.resid_pdrop) self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool") pos_embd_dim = self.rotary_dim or self.embed_dim self.embed_positions = create_sinusoidal_positions(config.max_position_embeddings, pos_embd_dim) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key # positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states, attention_mask, position_ids, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, ): query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query) key = self._split_heads(key) value = self._split_heads(value) sincos = jnp.take(self.embed_positions, position_ids, axis=0) sincos = jnp.split(sincos, 2, axis=-1) if self.rotary_dim is not None: k_rot = key[:, :, :, : self.rotary_dim] k_pass = key[:, :, :, self.rotary_dim :] q_rot = query[:, :, :, : self.rotary_dim] q_pass = query[:, :, :, self.rotary_dim :] k_rot = apply_rotary_pos_emb(k_rot, sincos) q_rot = apply_rotary_pos_emb(q_rot, sincos) key = jnp.concatenate([k_rot, k_pass], axis=-1) query = jnp.concatenate([q_rot, q_pass], axis=-1) else: key = apply_rotary_pos_emb(key, sincos) query = apply_rotary_pos_emb(query, sincos) query_length, key_length = query.shape[1], key.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] batch_size = hidden_states.shape[0] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) dropout_rng = None if not deterministic and self.config.attn_pdrop > 0.0: dropout_rng = self.make_rng("dropout") # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.has_variable("cache", "cached_key") or init_cache: key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask) # transform boolean mask into float mask attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) # usual dot product attention attn_weights = dot_product_attention_weights( query, key, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attn_pdrop, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output, deterministic=deterministic) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxGPTJMLP(nn.Module): config: GPTJConfig intermediate_size: int dtype: jnp.dtype = jnp.float32 def setup(self): embed_dim = self.config.hidden_size kernel_init = jax.nn.initializers.normal(self.config.initializer_range) self.fc_in = nn.Dense(self.intermediate_size, dtype=self.dtype, kernel_init=kernel_init) self.fc_out = nn.Dense(embed_dim, dtype=self.dtype, kernel_init=kernel_init) self.act = ACT2FN[self.config.activation_function] self.dropout = nn.Dropout(rate=self.config.resid_pdrop) def __call__(self, hidden_states, deterministic: bool = True): hidden_states = self.fc_in(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.fc_out(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxGPTJBlock(nn.Module): config: GPTJConfig dtype: jnp.dtype = jnp.float32 def setup(self): hidden_size = self.config.hidden_size inner_dim = self.config.n_inner if self.config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.attn = FlaxGPTJAttention(self.config, dtype=self.dtype) self.mlp = FlaxGPTJMLP(self.config, inner_dim, dtype=self.dtype) def __call__( self, hidden_states, attention_mask=None, position_ids=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, ): residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn( hidden_states, attention_mask=attention_mask, position_ids=position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic) # residual connection hidden_states = attn_output + feed_forward_hidden_states + residual return (hidden_states,) + attn_outputs[1:] class FlaxGPTJPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPTJConfig base_model_prefix = "transformer" module_class: nn.Module = None def __init__( self, config: GPTJConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init( rngs, input_ids, attention_mask, position_ids, encoder_hidden_states, encoder_attention_mask, return_dict=False, ) else: module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False) random_params = module_init_outputs["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length)) attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True ) return init_variables["cache"] @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING) def __call__( self, input_ids, attention_mask=None, position_ids=None, params: dict = None, past_key_values: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_ids.shape if position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.") position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTJAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), jnp.array(position_ids, dtype="i4"), not train, False, output_attentions, output_hidden_states, return_dict, rngs=rngs, mutable=mutable, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] return outputs class FlaxGPTJBlockCollection(nn.Module): config: GPTJConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.blocks = [ FlaxGPTJBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask=None, position_ids=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for block in self.blocks: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = block( hidden_states, attention_mask, position_ids=position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) # this contains possible `None` values - `FlaxGPTJModule` will filter them out outputs = (hidden_states, all_hidden_states, all_attentions) return outputs class FlaxGPTJModule(nn.Module): config: GPTJConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embed_dim = self.config.hidden_size self.wte = nn.Embed( self.config.vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.dropout = nn.Dropout(rate=self.config.embd_pdrop) self.h = FlaxGPTJBlockCollection(self.config, dtype=self.dtype) self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, deterministic=True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): input_embeds = self.wte(input_ids.astype("i4")) hidden_states = self.dropout(input_embeds, deterministic=deterministic) outputs = self.h( hidden_states, attention_mask, position_ids=position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = outputs[1] + (hidden_states,) outputs = (hidden_states, all_hidden_states) + outputs[2:] else: outputs = (hidden_states,) + outputs[1:] if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=outputs[1], attentions=outputs[-1], ) @add_start_docstrings( "The bare GPTJ Model transformer outputting raw hidden-states without any specific head on top.", GPTJ_START_DOCSTRING, ) class FlaxGPTJModel(FlaxGPTJPreTrainedModel): module_class = FlaxGPTJModule append_call_sample_docstring( FlaxGPTJModel, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC, ) class FlaxGPTJForCausalLMModule(nn.Module): config: GPTJConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.transformer = FlaxGPTJModule(self.config, dtype=self.dtype) self.lm_head = nn.Dense( self.config.vocab_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) def __call__( self, input_ids, attention_mask, position_ids, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): outputs = self.transformer( input_ids, attention_mask, position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: return (lm_logits,) + outputs[1:] return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings( """ The GPTJ Model transformer with a language modeling head on top. """, GPTJ_START_DOCSTRING, ) class FlaxGPTJForCausalLM(FlaxGPTJPreTrainedModel): module_class = FlaxGPTJForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): # initializing the cache batch_size, seq_length = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since GPTJ uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, "position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs append_call_sample_docstring( FlaxGPTJForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC, )
transformers/src/transformers/models/gptj/modeling_flax_gptj.py/0
{ "file_path": "transformers/src/transformers/models/gptj/modeling_flax_gptj.py", "repo_id": "transformers", "token_count": 12460 }
325
# coding=utf-8 # Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch GroupViT model.""" import collections.abc import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc" GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "nvidia/groupvit-gcc-yfcc", # See all GroupViT models at https://huggingface.co/models?filter=groupvit ] # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->groupvit def groupvit_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 def hard_softmax(logits: torch.Tensor, dim: int): y_soft = logits.softmax(dim) # Straight through. index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft return ret def gumbel_softmax(logits: torch.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> torch.Tensor: # more stable https://github.com/pytorch/pytorch/issues/41663 gumbel_dist = torch.distributions.gumbel.Gumbel( torch.tensor(0.0, device=logits.device, dtype=logits.dtype), torch.tensor(1.0, device=logits.device, dtype=logits.dtype), ) gumbels = gumbel_dist.sample(logits.shape) gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau) y_soft = gumbels.softmax(dim) if hard: # Straight through. index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft else: # Reparametrization trick. ret = y_soft return ret def resize_attention_map(attentions, height, width, align_corners=False): """ Args: attentions (`torch.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width] height (`int`): height of the output attention map width (`int`): width of the output attention map align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`. Returns: `torch.Tensor`: resized attention map of shape [batch_size, groups, height, width] """ scale = (height * width // attentions.shape[2]) ** 0.5 if height > width: feat_width = int(np.round(width / scale)) feat_height = attentions.shape[2] // feat_width else: feat_height = int(np.round(height / scale)) feat_width = attentions.shape[2] // feat_height batch_size = attentions.shape[0] groups = attentions.shape[1] # number of group token # [batch_size, groups, height*width, groups] -> [batch_size, groups, height, width] attentions = attentions.reshape(batch_size, groups, feat_height, feat_width) attentions = nn.functional.interpolate( attentions, size=(height, width), mode="bilinear", align_corners=align_corners ) return attentions def get_grouping_from_attentions(attentions, hw_shape): """ Args: attentions (`tuple(torch.FloatTensor)`: tuple of attention maps returned by `GroupViTVisionTransformer` hw_shape (`tuple(int)`): height and width of the output attention map Returns: `torch.Tensor`: the attention map of shape [batch_size, groups, height, width] """ attn_maps = [] with torch.no_grad(): prev_attn_masks = None for attn_masks in attentions: # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups] attn_masks = attn_masks.permute(0, 2, 1).contiguous() if prev_attn_masks is None: prev_attn_masks = attn_masks else: prev_attn_masks = prev_attn_masks @ attn_masks # [batch_size, heightxwidth, num_groups] -> [batch_size, num_groups, heightxwidth] -> [batch_size, num_groups, height, width] cur_attn_map = resize_attention_map(prev_attn_masks.permute(0, 2, 1).contiguous(), *hw_shape) attn_maps.append(cur_attn_map) # [batch_size, num_groups, height, width] final_grouping = attn_maps[-1] return final_grouping class GroupViTCrossAttentionLayer(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.attn = GroupViTAttention(config) self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = GroupViTMLP(config) self.norm_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, query, key): x = query x = x + self.attn(query, encoder_hidden_states=key)[0] x = x + self.mlp(self.norm2(x)) x = self.norm_post(x) return x class GroupViTAssignAttention(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.scale = config.hidden_size**-0.5 self.q_proj = nn.Linear(config.hidden_size, config.hidden_size) self.k_proj = nn.Linear(config.hidden_size, config.hidden_size) self.v_proj = nn.Linear(config.hidden_size, config.hidden_size) self.proj = nn.Linear(config.hidden_size, config.hidden_size) self.assign_eps = config.assign_eps def get_attn(self, attn, gumbel=True, hard=True): if gumbel and self.training: attn = gumbel_softmax(attn, dim=-2, hard=hard) else: if hard: attn = hard_softmax(attn, dim=-2) else: attn = nn.functional.softmax(attn, dim=-2) return attn def forward(self, query, key): value = key # [batch_size, query_length, channels] query = self.q_proj(query) # [batch_size, key_length, channels] key = self.k_proj(key) # [batch_size, key_length, channels] value = self.v_proj(value) # [batch_size, query_length, key_length] raw_attn = (query @ key.transpose(-2, -1)) * self.scale attn = self.get_attn(raw_attn) soft_attn = self.get_attn(raw_attn, gumbel=False, hard=False) attn = attn / (attn.sum(dim=-1, keepdim=True) + self.assign_eps) out = attn @ value out = self.proj(out) return out, soft_attn class GroupViTTokenAssign(nn.Module): def __init__(self, config: GroupViTVisionConfig, num_group_token, num_output_group): super().__init__() self.num_output_group = num_output_group # norm on group_tokens self.norm_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) assign_mlp_ratio = ( config.assign_mlp_ratio if isinstance(config.assign_mlp_ratio, collections.abc.Iterable) else (config.assign_mlp_ratio, config.assign_mlp_ratio) ) tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio] self.mlp_inter = GroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group) self.norm_post_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # norm on x self.norm_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pre_assign_attn = GroupViTCrossAttentionLayer(config) self.assign = GroupViTAssignAttention(config) self.norm_new_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp_channels = GroupViTMLP(config, config.hidden_size, channels_dim, config.hidden_size) def project_group_token(self, group_tokens): """ Args: group_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels] Returns: projected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels] """ # [B, num_output_groups, C] <- [B, num_group_tokens, C] projected_group_tokens = self.mlp_inter(group_tokens) projected_group_tokens = self.norm_post_tokens(projected_group_tokens) return projected_group_tokens def forward(self, image_tokens, group_tokens): """ Args: image_tokens (`torch.Tensor`): image tokens, of shape [batch_size, input_length, channels] group_tokens (`torch.Tensor`): group tokens, [batch_size, num_group_tokens, channels] """ group_tokens = self.norm_tokens(group_tokens) image_tokens = self.norm_x(image_tokens) # [batch_size, num_output_groups, channels] projected_group_tokens = self.project_group_token(group_tokens) projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens) new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens) new_image_tokens += projected_group_tokens new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens)) return new_image_tokens, attention @dataclass class GroupViTModelOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. segmentation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): Classification scores for each pixel. <Tip warning={true}> The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the original image size as post-processing. You should always check your logits shape and resize as needed. </Tip> text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`GroupViTTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`GroupViTVisionModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`GroupViTTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`GroupViTVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None segmentation_logits: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) class GroupViTPatchEmbeddings(nn.Module): """ Image to Patch Embedding. """ def __init__( self, image_size: int = 224, patch_size: Union[int, Tuple[int, int]] = 16, num_channels: int = 3, embed_dim: int = 768, ): super().__init__() image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) x = self.projection(pixel_values).flatten(2).transpose(1, 2) return x class GroupViTVisionEmbeddings(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.patch_embeddings = GroupViTPatchEmbeddings( image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.hidden_size, ) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches, config.hidden_size)) self.dropout = nn.Dropout(config.dropout) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. Source: https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ npatch = embeddings.shape[1] if npatch == self.position_embeddings.shape[1] and height == width: return self.position_embeddings patch_pos_embed = self.position_embeddings num_original_pos_embed = patch_pos_embed.shape[1] dim = embeddings.shape[-1] feat_height = height // self.config.patch_size feat_width = width // self.config.patch_size # we add a small number to avoid floating point error in the interpolation # see discussion at https://github.com/facebookresearch/dino/issues/8 feat_height, feat_width = feat_height + 0.1, feat_width + 0.1 original_height = original_width = math.sqrt(num_original_pos_embed) reshaped_patch_pos_embed = patch_pos_embed.reshape(1, int(original_height), int(original_width), dim).permute( 0, 3, 1, 2 ) scale_factor = (feat_height / original_height, feat_width / original_width) patch_pos_embed = nn.functional.interpolate( reshaped_patch_pos_embed, scale_factor=scale_factor, mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return patch_pos_embed def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) embeddings = self.layernorm(embeddings) batch_size, seq_len, _ = embeddings.size() # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->GroupViT class GroupViTTextEmbeddings(nn.Module): def __init__(self, config: GroupViTTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings class GroupViTStage(nn.Module): """This corresponds to the `GroupingLayer` class in the GroupViT implementation.""" def __init__( self, config: GroupViTVisionConfig, depth: int, num_prev_group_token: int, num_group_token: int, num_output_group: int, ): super().__init__() self.depth = depth self.num_group_token = num_group_token if num_group_token > 0: self.group_token = nn.Parameter(torch.zeros(1, num_group_token, config.hidden_size)) else: self.group_token = None self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(depth)]) if num_group_token > 0: self.downsample = GroupViTTokenAssign( config=config, num_group_token=num_group_token, num_output_group=num_output_group, ) else: self.downsample = None if num_prev_group_token > 0 and num_group_token > 0: self.group_projector = nn.Sequential( nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps), GroupViTMixerMLP(config, num_prev_group_token, config.hidden_size // 2, num_group_token), ) else: self.group_projector = None @property def with_group_token(self): return self.group_token is not None def split_x(self, x): if self.with_group_token: return x[:, : -self.num_group_token], x[:, -self.num_group_token :] else: return x, None def concat_x(self, x: torch.Tensor, group_token: Optional[torch.Tensor] = None) -> torch.Tensor: if group_token is None: return x return torch.cat([x, group_token], dim=1) def forward( self, hidden_states: torch.Tensor, prev_group_token: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the grouping tensors of Grouping block. """ if self.with_group_token: group_token = self.group_token.expand(hidden_states.size(0), -1, -1) if self.group_projector is not None: group_token = group_token + self.group_projector(prev_group_token) else: group_token = None x = hidden_states cat_x = self.concat_x(x, group_token) for layer in self.layers: layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None) cat_x = layer_out[0] x, group_token = self.split_x(cat_x) attention = None if self.downsample is not None: x, attention = self.downsample(x, group_token) outputs = (x, group_token) if output_attentions: outputs = outputs + (attention,) return outputs class GroupViTMLP(nn.Module): def __init__( self, config: GroupViTVisionConfig, hidden_size: Optional[int] = None, intermediate_size: Optional[int] = None, output_size: Optional[int] = None, ): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] hidden_size = hidden_size if hidden_size is not None else config.hidden_size intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size output_size = output_size if output_size is not None else hidden_size self.fc1 = nn.Linear(hidden_size, intermediate_size) self.fc2 = nn.Linear(intermediate_size, output_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class GroupViTMixerMLP(GroupViTMLP): def forward(self, x): x = super().forward(x.transpose(1, 2)) return x.transpose(1, 2) class GroupViTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() is_cross_attention = encoder_hidden_states is not None # get query proj query_states = self.q_proj(hidden_states) * self.scale if is_cross_attention: key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz) value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) # apply the causal_attention_mask first if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {causal_attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->GroupViT class GroupViTEncoderLayer(nn.Module): def __init__(self, config: GroupViTConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = GroupViTAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = GroupViTMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class GroupViTPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GroupViTConfig base_model_prefix = "groupvit" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" init_range = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=init_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) factor = self.config.initializer_factor if isinstance(module, GroupViTTextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, GroupViTAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, GroupViTMLP): factor = self.config.initializer_factor in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) GROUPVIT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GROUPVIT_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ GROUPVIT_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ GROUPVIT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class GroupViTVisionEncoder(nn.Module): def __init__(self, config: GroupViTVisionConfig) -> None: super().__init__() self.config = config self.stages = nn.ModuleList( [ GroupViTStage( config=config, depth=config.depths[i], num_group_token=config.num_group_tokens[i], num_output_group=config.num_output_groups[i], num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0, ) for i in range(len(config.depths)) ] ) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict all_hidden_states = () if output_hidden_states else None all_groupings = () if output_attentions else None group_tokens = None for i, stage in enumerate(self.stages): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = stage(hidden_states, group_tokens, output_attentions) hidden_states = layer_outputs[0] group_tokens = layer_outputs[1] if output_attentions and layer_outputs[2] is not None: all_groupings = all_groupings + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings ) class GroupViTTextEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self-attention layers. Each layer is a [`GroupViTEncoderLayer`]. Args: config: GroupViTTextConfig """ def __init__(self, config: GroupViTTextConfig): super().__init__() self.config = config self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Copied from transformers.models.clip.modeling_clip.CLIPTextTransformer with CLIPText->GroupViTText, CLIPEncoder->GroupViTTextEncoder, CLIP_TEXT->GROUPVIT_TEXT class GroupViTTextTransformer(nn.Module): def __init__(self, config: GroupViTTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = GroupViTTextEmbeddings(config) self.encoder = GroupViTTextEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) # For `pooled_output` computation self.eos_token_id = config.eos_token_id @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError("You have to specify input_ids") input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) # CLIP's text model uses causal mask, prepare it here. # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 causal_attention_mask = _create_4d_causal_attention_mask( input_shape, hidden_states.dtype, device=hidden_states.device ) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) if self.eos_token_id == 2: # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here. # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added # ------------------------------------------------------------ # text_embeds.shape = [batch_size, sequence_length, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 pooled_output = last_hidden_state[ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1), ] else: # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible) pooled_output = last_hidden_state[ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), # We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`) (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id) .int() .argmax(dim=-1), ] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class GroupViTTextModel(GroupViTPreTrainedModel): config_class = GroupViTTextConfig def __init__(self, config: GroupViTTextConfig): super().__init__(config) self.text_model = GroupViTTextTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from transformers import CLIPTokenizer, GroupViTTextModel >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> model = GroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" return self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) class GroupViTVisionTransformer(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = GroupViTVisionEmbeddings(config) self.encoder = GroupViTVisionEncoder(config) self.layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) encoder_outputs = self.encoder( hidden_states=hidden_states, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] # normalize the last hidden state last_hidden_state = self.layernorm(last_hidden_state) pooled_output = last_hidden_state.mean(dim=1) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class GroupViTVisionModel(GroupViTPreTrainedModel): config_class = GroupViTVisionConfig main_input_name = "pixel_values" def __init__(self, config: GroupViTVisionConfig): super().__init__(config) self.vision_model = GroupViTVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> GroupViTPatchEmbeddings: return self.vision_model.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, GroupViTVisionModel >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> model = GroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) @add_start_docstrings(GROUPVIT_START_DOCSTRING) class GroupViTModel(GroupViTPreTrainedModel): config_class = GroupViTConfig def __init__(self, config: GroupViTConfig): super().__init__(config) if not isinstance(config.text_config, GroupViTTextConfig): raise ValueError( "config.text_config is expected to be of type GroupViTTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, GroupViTVisionConfig): raise ValueError( "config.vision_config is expected to be of type GroupViTVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.projection_intermediate_dim = config.projection_intermediate_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = GroupViTTextTransformer(text_config) self.vision_model = GroupViTVisionTransformer(vision_config) self.visual_projection = nn.Sequential( nn.Linear(self.vision_embed_dim, self.projection_intermediate_dim, bias=True), nn.BatchNorm1d(self.projection_intermediate_dim), nn.ReLU(inplace=True), nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True), ) self.text_projection = nn.Sequential( nn.Linear(self.text_embed_dim, self.projection_intermediate_dim, bias=True), nn.BatchNorm1d(self.projection_intermediate_dim), nn.ReLU(inplace=True), nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True), ) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`GroupViTTextModel`]. Examples: ```python >>> from transformers import CLIPTokenizer, GroupViTModel >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`GroupViTVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, GroupViTModel >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=GroupViTModelOutput, config_class=GroupViTConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_segmentation: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, GroupViTModelOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, GroupViTModel >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_segmentation = ( output_segmentation if output_segmentation is not None else self.config.output_segmentation ) if output_segmentation: output_attentions = True output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() seg_logits = None if output_segmentation: # grouped features # [batch_size_image, num_group, hidden_size] image_group_embeds = vision_outputs[0] # [batch_size_image*num_group, hidden_size] image_group_embeds = self.visual_projection(image_group_embeds.reshape(-1, image_group_embeds.shape[-1])) if output_hidden_states: attentions = vision_outputs[3] else: attentions = vision_outputs[2] # [batch_size_image, num_group, height, width] grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:]) # normalized features image_group_embeds = image_group_embeds / image_group_embeds.norm(dim=-1, keepdim=True) # [batch_size_image x num_group, batch_size_text] logits_per_image_group = torch.matmul(image_group_embeds, text_embeds.t()) * logit_scale # [batch_size_image, batch_size_text, num_group] logits_per_image_group = logits_per_image_group.reshape( image_embeds.shape[0], -1, text_embeds.shape[0] ).permute(0, 2, 1) # [batch_size_image, batch_size_text, height x width] flatten_grouping = grouping.reshape(grouping.shape[0], grouping.shape[1], -1) # [batch_size_image, batch_size_text, height, width] seg_logits = torch.matmul(logits_per_image_group, flatten_grouping) * logit_scale seg_logits = seg_logits.reshape( seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3] ) loss = None if return_loss: loss = groupvit_loss(logits_per_text) if not return_dict: if seg_logits is not None: output = ( logits_per_image, logits_per_text, seg_logits, text_embeds, image_embeds, text_outputs, vision_outputs, ) else: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return GroupViTModelOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, segmentation_logits=seg_logits, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, )
transformers/src/transformers/models/groupvit/modeling_groupvit.py/0
{ "file_path": "transformers/src/transformers/models/groupvit/modeling_groupvit.py", "repo_id": "transformers", "token_count": 29453 }
326
# coding=utf-8 # Copyright 2010, The Microsoft Research Asia LayoutLM Team authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ LayoutLM model configuration""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PretrainedConfig, PreTrainedTokenizer from ...onnx import OnnxConfig, PatchingSpec from ...utils import TensorType, is_torch_available, logging logger = logging.get_logger(__name__) LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/layoutlm-base-uncased": ( "https://huggingface.co/microsoft/layoutlm-base-uncased/resolve/main/config.json" ), "microsoft/layoutlm-large-uncased": ( "https://huggingface.co/microsoft/layoutlm-large-uncased/resolve/main/config.json" ), } class LayoutLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LayoutLMModel`]. It is used to instantiate a LayoutLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LayoutLM [microsoft/layoutlm-base-uncased](https://huggingface.co/microsoft/layoutlm-base-uncased) architecture. Configuration objects inherit from [`BertConfig`] and can be used to control the model outputs. Read the documentation from [`BertConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the LayoutLM model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`LayoutLMModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed into [`LayoutLMModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 0): The value used to pad input_ids. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. max_2d_position_embeddings (`int`, *optional*, defaults to 1024): The maximum value that the 2D position embedding might ever used. Typically set this to something large just in case (e.g., 1024). Examples: ```python >>> from transformers import LayoutLMConfig, LayoutLMModel >>> # Initializing a LayoutLM configuration >>> configuration = LayoutLMConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = LayoutLMModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "layoutlm" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type="absolute", use_cache=True, max_2d_position_embeddings=1024, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.max_2d_position_embeddings = max_2d_position_embeddings class LayoutLMOnnxConfig(OnnxConfig): def __init__( self, config: PretrainedConfig, task: str = "default", patching_specs: List[PatchingSpec] = None, ): super().__init__(config, task=task, patching_specs=patching_specs) self.max_2d_positions = config.max_2d_position_embeddings - 1 @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("token_type_ids", {0: "batch", 1: "sequence"}), ] ) def generate_dummy_inputs( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: """ Generate inputs to provide to the ONNX exporter for the specific framework Args: tokenizer: The tokenizer associated with this model configuration batch_size: The batch size (int) to export the model for (-1 means dynamic axis) seq_length: The sequence length (int) to export the model for (-1 means dynamic axis) is_pair: Indicate if the input is a pair (sentence 1, sentence 2) framework: The framework (optional) the tokenizer will generate tensor for Returns: Mapping[str, Tensor] holding the kwargs to provide to the model's forward function """ input_dict = super().generate_dummy_inputs( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) # Generate a dummy bbox box = [48, 84, 73, 128] if not framework == TensorType.PYTORCH: raise NotImplementedError("Exporting LayoutLM to ONNX is currently only supported for PyTorch.") if not is_torch_available(): raise ValueError("Cannot generate dummy inputs without PyTorch installed.") import torch batch_size, seq_length = input_dict["input_ids"].shape input_dict["bbox"] = torch.tensor([*[box] * seq_length]).tile(batch_size, 1, 1) return input_dict
transformers/src/transformers/models/layoutlm/configuration_layoutlm.py/0
{ "file_path": "transformers/src/transformers/models/layoutlm/configuration_layoutlm.py", "repo_id": "transformers", "token_count": 3609 }
327
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for LayoutLMv3.""" from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract logger = logging.get_logger(__name__) def normalize_box(box, width, height): return [ int(1000 * (box[0] / width)), int(1000 * (box[1] / height)), int(1000 * (box[2] / width)), int(1000 * (box[3] / height)), ] def apply_tesseract( image: np.ndarray, lang: Optional[str], tesseract_config: Optional[str], input_data_format: Optional[Union[ChannelDimension, str]] = None, ): """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes.""" # apply OCR pil_image = to_pil_image(image, input_data_format=input_data_format) image_width, image_height = pil_image.size data = pytesseract.image_to_data(pil_image, lang=lang, output_type="dict", config=tesseract_config) words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()] words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices] left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices] top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices] width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices] height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format actual_boxes = [] for x, y, w, h in zip(left, top, width, height): actual_box = [x, y, x + w, y + h] actual_boxes.append(actual_box) # finally, normalize the bounding boxes normalized_boxes = [] for box in actual_boxes: normalized_boxes.append(normalize_box(box, image_width, image_height)) assert len(words) == len(normalized_boxes), "Not as many words as there are bounding boxes" return words, normalized_boxes class LayoutLMv3ImageProcessor(BaseImageProcessor): r""" Constructs a LayoutLMv3 image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to `(size["height"], size["width"])`. Can be overridden by `do_resize` in `preprocess`. size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after resizing. Can be overridden by `size` in `preprocess`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image's pixel values by the specified `rescale_value`. Can be overridden by `do_rescale` in `preprocess`. rescale_factor (`float`, *optional*, defaults to 1 / 255): Value by which the image's pixel values are rescaled. Can be overridden by `rescale_factor` in `preprocess`. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`Iterable[float]` or `float`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`Iterable[float]` or `float`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. apply_ocr (`bool`, *optional*, defaults to `True`): Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by the `apply_ocr` parameter in the `preprocess` method. ocr_lang (`str`, *optional*): The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is used. Can be overridden by the `ocr_lang` parameter in the `preprocess` method. tesseract_config (`str`, *optional*): Any additional custom configuration flags that are forwarded to the `config` parameter when calling Tesseract. For example: '--psm 6'. Can be overridden by the `tesseract_config` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_value: float = 1 / 255, do_normalize: bool = True, image_mean: Union[float, Iterable[float]] = None, image_std: Union[float, Iterable[float]] = None, apply_ocr: bool = True, ocr_lang: Optional[str] = None, tesseract_config: Optional[str] = "", **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 224, "width": 224} size = get_size_dict(size) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_value self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.apply_ocr = apply_ocr self.ocr_lang = ocr_lang self.tesseract_config = tesseract_config self._valid_processor_keys = [ "images", "do_resize", "size", "resample", "do_rescale", "rescale_factor", "do_normalize", "image_mean", "image_std", "apply_ocr", "ocr_lang", "tesseract_config", "return_tensors", "data_format", "input_data_format", ] # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample=None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Union[float, Iterable[float]] = None, image_std: Union[float, Iterable[float]] = None, apply_ocr: bool = None, ocr_lang: Optional[str] = None, tesseract_config: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Desired size of the output image after applying `resize`. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` filters. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image pixel values between [0, 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to apply to the image pixel values. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `Iterable[float]`, *optional*, defaults to `self.image_mean`): Mean values to be used for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `Iterable[float]`, *optional*, defaults to `self.image_std`): Standard deviation values to be used for normalization. Only has an effect if `do_normalize` is set to `True`. apply_ocr (`bool`, *optional*, defaults to `self.apply_ocr`): Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. ocr_lang (`str`, *optional*, defaults to `self.ocr_lang`): The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is used. tesseract_config (`str`, *optional*, defaults to `self.tesseract_config`): Any additional custom configuration flags that are forwarded to the `config` parameter when calling Tesseract. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std apply_ocr = apply_ocr if apply_ocr is not None else self.apply_ocr ocr_lang = ocr_lang if ocr_lang is not None else self.ocr_lang tesseract_config = tesseract_config if tesseract_config is not None else self.tesseract_config images = make_list_of_images(images) validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self, "pytesseract") words_batch = [] boxes_batch = [] for image in images: words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format) words_batch.append(words) boxes_batch.append(boxes) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) if apply_ocr: data["words"] = words_batch data["boxes"] = boxes_batch return data
transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py/0
{ "file_path": "transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py", "repo_id": "transformers", "token_count": 7927 }
328
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team, and the # Lxmert Authors. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 LXMERT model.""" from __future__ import annotations import warnings from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, get_initializer, keras, keras_serializable, shape_list, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_lxmert import LxmertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased" _CONFIG_FOR_DOC = "LxmertConfig" TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "unc-nlp/lxmert-base-uncased", ] @dataclass class TFLxmertModelOutput(ModelOutput): """ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship" encoder") Args: language_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the language encoder. vision_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the visual encoder. pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ language_output: tf.Tensor | None = None vision_output: tf.Tensor | None = None pooled_output: tf.Tensor | None = None language_hidden_states: Tuple[tf.Tensor] | None = None vision_hidden_states: Tuple[tf.Tensor] | None = None language_attentions: Tuple[tf.Tensor] | None = None vision_attentions: Tuple[tf.Tensor] | None = None cross_encoder_attentions: Tuple[tf.Tensor] | None = None @dataclass class TFLxmertForPreTrainingOutput(ModelOutput): """ Output type of [`LxmertForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cross_relationship_score (`tf.Tensor` of shape `(batch_size, 2)`): Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax). question_answering_score (`tf.Tensor` of shape `(batch_size, n_qa_answers)`): Prediction scores of question answering objective (classification). language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None prediction_logits: tf.Tensor | None = None cross_relationship_score: tf.Tensor | None = None question_answering_score: tf.Tensor | None = None language_hidden_states: Tuple[tf.Tensor] | None = None vision_hidden_states: Tuple[tf.Tensor] | None = None language_attentions: Tuple[tf.Tensor] | None = None vision_attentions: Tuple[tf.Tensor] | None = None cross_encoder_attentions: Tuple[tf.Tensor] | None = None class TFLxmertVisualFeatureEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) # Object feature encoding self.visn_fc = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="visn_fc", ) self.visn_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="visn_layer_norm") # Box position encoding self.box_fc = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="box_fc", ) self.box_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="box_layer_norm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.feat_dim = config.visual_feat_dim self.pos_dim = config.visual_pos_dim self.config = config def call(self, visn_input, training=False): feats, boxes = visn_input x = self.visn_fc(feats) x = self.visn_layer_norm(x) y = self.box_fc(boxes) y = self.box_layer_norm(y) output = (x + y) / 2 output = self.dropout(output, training=training) return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "visn_fc", None) is not None: with tf.name_scope(self.visn_fc.name): self.visn_fc.build([None, None, self.feat_dim]) if getattr(self, "visn_layer_norm", None) is not None: with tf.name_scope(self.visn_layer_norm.name): self.visn_layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "box_fc", None) is not None: with tf.name_scope(self.box_fc.name): self.box_fc.build([None, None, self.pos_dim]) if getattr(self, "box_layer_norm", None) is not None: with tf.name_scope(self.box_layer_norm.name): self.box_layer_norm.build([None, None, self.config.hidden_size]) class TFLxmertEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False): """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFLxmertAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query", ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key", ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value", ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.ctx_dim = config.hidden_size self.config = config def transpose_for_scores(self, x, batch_size): # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, hidden_states, context, attention_mask, output_attentions, training=False): batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(context) mixed_value_layer = self.value(context) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul( query_layer, key_layer, transpose_b=True ) # (batch size, num_heads, seq_len_q, seq_len_k) dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFLxmertModel call() function) attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape( context_layer, (batch_size, -1, self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.ctx_dim]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.ctx_dim]) class TFLxmertIntermediate(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFLxmertOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFLxmertAttentionOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFLxmertSelfAttentionLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.self = TFLxmertAttention(config, name="self") self.attention_output = TFLxmertAttentionOutput(config, name="output") def call(self, input_tensor, attention_mask, output_attentions, training=False): # Self attention attends to itself, thus keys and queries are the same (input_tensor). self_output = self.self(input_tensor, input_tensor, attention_mask, output_attentions) if output_attentions: attention_probs = self_output[1] attention_output = self.attention_output(self_output[0], input_tensor) return (attention_output, attention_probs) if output_attentions else (attention_output,) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self", None) is not None: with tf.name_scope(self.self.name): self.self.build(None) if getattr(self, "attention_output", None) is not None: with tf.name_scope(self.attention_output.name): self.attention_output.build(None) class TFLxmertCrossAttentionLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.att = TFLxmertAttention(config, name="att") self.attention_output = TFLxmertAttentionOutput(config, name="output") def call( self, input_tensor, ctx_tensor, ctx_att_mask, output_attentions=False, training=False, ): output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions, training=training) if output_attentions: attention_probs = output[1] attention_output = self.attention_output(output[0], input_tensor, training=training) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "att", None) is not None: with tf.name_scope(self.att.name): self.att.build(None) if getattr(self, "attention_output", None) is not None: with tf.name_scope(self.attention_output.name): self.attention_output.build(None) class TFLxmertLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.attention = TFLxmertSelfAttentionLayer(config, name="attention") self.intermediate = TFLxmertIntermediate(config, name="intermediate") self.transformer_output = TFLxmertOutput(config, name="output") def call(self, hidden_states, attention_mask, output_attentions, training=False): attention_outputs = self.attention(hidden_states, attention_mask, output_attentions, training=training) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.transformer_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "transformer_output", None) is not None: with tf.name_scope(self.transformer_output.name): self.transformer_output.build(None) class TFLxmertXLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.visual_attention = TFLxmertCrossAttentionLayer(config, name="visual_attention") # Self-attention Layers self.lang_self_att = TFLxmertSelfAttentionLayer(config, name="lang_self_att") self.visn_self_att = TFLxmertSelfAttentionLayer(config, name="visn_self_att") # Intermediate and Output Layers (FFNs) self.lang_inter = TFLxmertIntermediate(config, name="lang_inter") self.lang_output = TFLxmertOutput(config, name="lang_output") self.visn_inter = TFLxmertIntermediate(config, name="visn_inter") self.visn_output = TFLxmertOutput(config, name="visn_output") def cross_att( self, lang_input, lang_attention_mask, visn_input, visn_attention_mask, output_attentions, training=False, ): # Cross Attention # Keras saving and loading model *does not work* with the same inputs for two layers. lang_attention_lang_input = tf.identity(lang_input) visn_attention_lang_input = tf.identity(lang_input) lang_attention_visn_input = tf.identity(visn_input) visn_attention_visn_input = tf.identity(visn_input) lang_att_output = self.visual_attention( lang_attention_lang_input, lang_attention_visn_input, visn_attention_mask, output_attentions=output_attentions, training=training, ) visn_att_output = self.visual_attention( visn_attention_visn_input, visn_attention_lang_input, lang_attention_mask, output_attentions=output_attentions, training=training, ) return lang_att_output, visn_att_output def self_att( self, lang_input, lang_attention_mask, visn_input, visn_attention_mask, training=False, ): # Self Attention output_attentions = False lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions, training=training) visn_att_output = self.visn_self_att(visn_input, visn_attention_mask, output_attentions, training=training) return lang_att_output[0], visn_att_output[0] def output_fc(self, lang_input, visn_input, training=False): # FC layers lang_inter_output = self.lang_inter(lang_input) visn_inter_output = self.visn_inter(visn_input) # Layer output lang_output = self.lang_output(lang_inter_output, lang_input, training) visn_output = self.visn_output(visn_inter_output, visn_input, training) return lang_output, visn_output def call( self, lang_feats, lang_attention_mask, visn_feats, visn_attention_mask, output_attentions, training=False, ): lang_att_output = lang_feats visn_att_output = visn_feats lang_att_output, visn_att_output = self.cross_att( lang_att_output, lang_attention_mask, visn_att_output, visn_attention_mask, output_attentions, training=training, ) attention_probs = lang_att_output[1:] lang_att_output, visn_att_output = self.self_att( lang_att_output[0], lang_attention_mask, visn_att_output[0], visn_attention_mask, training=training, ) lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output, training=training) return (lang_output, visn_output, attention_probs[0]) if output_attentions else (lang_output, visn_output) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "visual_attention", None) is not None: with tf.name_scope(self.visual_attention.name): self.visual_attention.build(None) if getattr(self, "lang_self_att", None) is not None: with tf.name_scope(self.lang_self_att.name): self.lang_self_att.build(None) if getattr(self, "visn_self_att", None) is not None: with tf.name_scope(self.visn_self_att.name): self.visn_self_att.build(None) if getattr(self, "lang_inter", None) is not None: with tf.name_scope(self.lang_inter.name): self.lang_inter.build(None) if getattr(self, "lang_output", None) is not None: with tf.name_scope(self.lang_output.name): self.lang_output.build(None) if getattr(self, "visn_inter", None) is not None: with tf.name_scope(self.visn_inter.name): self.visn_inter.build(None) if getattr(self, "visn_output", None) is not None: with tf.name_scope(self.visn_output.name): self.visn_output.build(None) class TFLxmertEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.visn_fc = TFLxmertVisualFeatureEncoder(config, name="visn_fc") # Number of layers self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers # Layers # Using self.layer instead of self.l_layer to support loading BERT weights. self.layer = [TFLxmertLayer(config, name=f"layer_._{i}") for i in range(self.num_l_layers)] self.x_layers = [TFLxmertXLayer(config, name=f"x_layers_._{i}") for i in range(self.num_x_layers)] self.r_layers = [TFLxmertLayer(config, name=f"r_layers_._{i}") for i in range(self.num_r_layers)] self.config = config def call( self, lang_feats=None, lang_attention_mask=None, visual_feats=None, visual_pos=None, visual_attention_mask=None, output_attentions=None, training=False, ): vision_hidden_states = () language_hidden_states = () vision_attentions = () if output_attentions or self.config.output_attentions else None language_attentions = () if output_attentions or self.config.output_attentions else None cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None visual_feats = self.visn_fc([visual_feats, visual_pos], training=training) # Run language layers for layer_module in self.layer: l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions, training=training) lang_feats = l_outputs[0] language_hidden_states = language_hidden_states + (lang_feats,) if language_attentions is not None: language_attentions = language_attentions + (l_outputs[1],) # Run relational layers for layer_module in self.r_layers: v_outputs = layer_module( visual_feats, visual_attention_mask, output_attentions, training=training, ) visual_feats = v_outputs[0] vision_hidden_states = vision_hidden_states + (visual_feats,) if vision_attentions is not None: vision_attentions = vision_attentions + (v_outputs[1],) # Run cross-modality layers for layer_module in self.x_layers: x_outputs = layer_module( lang_feats, lang_attention_mask, visual_feats, visual_attention_mask, output_attentions, training=training, ) lang_feats, visual_feats = x_outputs[:2] vision_hidden_states = vision_hidden_states + (visual_feats,) language_hidden_states = language_hidden_states + (lang_feats,) if cross_encoder_attentions is not None: cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],) visual_encoder_outputs = ( vision_hidden_states, vision_attentions if output_attentions else None, ) lang_encoder_outputs = ( language_hidden_states, language_attentions if output_attentions else None, ) return ( visual_encoder_outputs, lang_encoder_outputs, cross_encoder_attentions if output_attentions else None, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "visn_fc", None) is not None: with tf.name_scope(self.visn_fc.name): self.visn_fc.build(None) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) if getattr(self, "x_layers", None) is not None: for layer in self.x_layers: with tf.name_scope(layer.name): layer.build(None) if getattr(self, "r_layers", None) is not None: for layer in self.r_layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFLxmertMainLayer(keras.layers.Layer): config_class = LxmertConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFLxmertEmbeddings(config, name="embeddings") self.encoder = TFLxmertEncoder(config, name="encoder") self.pooler = TFLxmertPooler(config, name="pooler") self.config = config def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call( self, input_ids=None, visual_feats=None, visual_pos=None, attention_mask=None, visual_attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if visual_pos is None or visual_feats is None: raise ValueError("visual_feats and visual_pos cannot be `None` in LXMERT's `call` method.") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) # Positional Word Embeddings embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds, training) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) if visual_attention_mask is not None: extended_visual_attention_mask = tf.reshape(visual_attention_mask, (input_shape[0], 1, 1, input_shape[1])) extended_visual_attention_mask = tf.expand_dims(tf.expand_dims(visual_attention_mask, axis=1), axis=1) extended_visual_attention_mask = tf.cast(extended_visual_attention_mask, dtype=embedding_output.dtype) extended_visual_attention_mask = tf.multiply( tf.subtract(one_cst, extended_visual_attention_mask), ten_thousand_cst ) else: extended_visual_attention_mask = None # Run Lxmert encoder encoder_outputs = self.encoder( embedding_output, extended_attention_mask, visual_feats, visual_pos, extended_visual_attention_mask, output_attentions, training, ) visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2] vision_hidden_states = visual_encoder_outputs[0] language_hidden_states = lang_encoder_outputs[0] all_attentions = () if output_attentions: language_attentions = lang_encoder_outputs[1] vision_attentions = visual_encoder_outputs[1] cross_encoder_attentions = encoder_outputs[2] all_attentions = ( language_attentions, vision_attentions, cross_encoder_attentions, ) hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else () visual_output = vision_hidden_states[-1] lang_output = language_hidden_states[-1] pooled_output = self.pooler(lang_output) if not return_dict: return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions return TFLxmertModelOutput( pooled_output=pooled_output, language_output=lang_output, vision_output=visual_output, language_hidden_states=language_hidden_states if output_hidden_states else None, vision_hidden_states=vision_hidden_states if output_hidden_states else None, language_attentions=language_attentions if output_attentions else None, vision_attentions=vision_attentions if output_attentions else None, cross_encoder_attentions=cross_encoder_attentions if output_attentions else None, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFLxmertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LxmertConfig base_model_prefix = "lxmert" @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) return { "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": visual_pos, } @property def input_signature(self): return { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), "visual_feats": tf.TensorSpec((None, None, self.config.visual_feat_dim), tf.float32, name="visual_feats"), "visual_pos": tf.TensorSpec((None, None, 4), tf.float32, name="visual_pos"), "visual_attention_mask": tf.TensorSpec((None, None), tf.int32, name="visual_attention_mask"), "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), } LXMERT_START_DOCSTRING = r""" The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction. This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`LxmertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LXMERT_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) visual_feats (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model) These are currently not provided by the transformers library. visual_pos (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to 1. These are currently not provided by the transformers library. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): MMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.", LXMERT_START_DOCSTRING, ) class TFLxmertModel(TFLxmertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.lxmert = TFLxmertMainLayer(config, name="lxmert") @unpack_inputs @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLxmertModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, visual_feats: tf.Tensor | None = None, visual_pos: tf.Tensor | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, visual_attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple, TFLxmertModelOutput]: outputs = self.lxmert( input_ids, visual_feats, visual_pos, attention_mask, visual_attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict, training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "lxmert", None) is not None: with tf.name_scope(self.lxmert.name): self.lxmert.build(None) class TFLxmertPooler(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert class TFLxmertPredictionHeadTransform(keras.layers.Layer): def __init__(self, config: LxmertConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert class TFLxmertLMPredictionHead(keras.layers.Layer): def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.transform = TFLxmertPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") if self.built: return self.built = True if getattr(self, "transform", None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) def get_output_embeddings(self) -> keras.layers.Layer: return self.input_embeddings def set_output_embeddings(self, value: tf.Variable): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.transform(hidden_states=hidden_states) seq_length = shape_list(hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert class TFLxmertMLMHead(keras.layers.Layer): def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(hidden_states=sequence_output) return prediction_scores def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) class TFLxmertPreTrainingHeads(keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") self.seq_relationship = keras.layers.Dense( 2, kernel_initializer=get_initializer(config.initializer_range), name="seq_relationship", ) self.config = config def call(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) if getattr(self, "seq_relationship", None) is not None: with tf.name_scope(self.seq_relationship.name): self.seq_relationship.build([None, None, self.config.hidden_size]) class TFLxmertVisualAnswerHead(keras.layers.Layer): def __init__(self, config, num_labels, **kwargs): super().__init__(**kwargs) hid_dim = config.hidden_size self.dense = keras.layers.Dense( hid_dim * 2, kernel_initializer=get_initializer(config.initializer_range), name="logit_fc_._0", ) self.activation = get_tf_activation("gelu") self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="logit_fc_._2") self.dense_1 = keras.layers.Dense( num_labels, kernel_initializer=get_initializer(config.initializer_range), name="logit_fc_._3", ) self.hid_dim = hid_dim def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.dense_1(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.hid_dim]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, self.hid_dim * 2]) if getattr(self, "dense_1", None) is not None: with tf.name_scope(self.dense_1.name): self.dense_1.build([None, None, self.hid_dim * 2]) class TFLxmertVisualObjHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.transform = TFLxmertPredictionHeadTransform(config, name="transform") # Decide the use of visual losses visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels} if config.visual_attr_loss: visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels} if config.visual_feat_loss: visual_losses["feat"] = {"shape": (-1, 2048), "num": config.visual_feat_dim} self.visual_losses = visual_losses # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder_dict = { key: keras.layers.Dense( self.visual_losses[key]["num"], kernel_initializer=get_initializer(config.initializer_range), name=f"decoder_dict.{key}", ) for key in self.visual_losses } self.config = config def call(self, hidden_states): hidden_states = self.transform(hidden_states) output = {} for key in self.visual_losses: output[key] = self.decoder_dict[key](hidden_states) return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transform", None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) if getattr(self, "decoder_dict", None) is not None: for layer in self.decoder_dict.values(): with tf.name_scope(layer.name): layer.build([None, None, self.config.hidden_size]) @add_start_docstrings("""Lxmert Model with a `language modeling` head on top.""", LXMERT_START_DOCSTRING) class TFLxmertForPreTraining(TFLxmertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.num_qa_labels = config.num_qa_labels self.visual_loss_normalizer = config.visual_loss_normalizer # Use of pretraining tasks self.task_mask_lm = config.task_mask_lm self.task_obj_predict = config.task_obj_predict self.task_matched = config.task_matched self.task_qa = config.task_qa # Lxmert backbone self.lxmert = TFLxmertMainLayer(config, name="lxmert") # Pre-training heads self.cls = TFLxmertPreTrainingHeads(config, self.lxmert.embeddings, name="cls") if self.task_obj_predict: self.obj_predict_head = TFLxmertVisualObjHead(config, name="obj_predict_head") if self.task_qa: self.answer_head = TFLxmertVisualAnswerHead(config, self.num_qa_labels, name="answer_head") # Loss functions self.loss_fcts = { "l2": keras.losses.Huber(delta=1.0, name="huber_loss"), "visn_ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True), "ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True), } visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = { "shape": (-1,), "num": config.num_object_labels, "loss": "visn_ce", } if config.visual_attr_loss: visual_losses["attr"] = { "shape": (-1,), "num": config.num_attr_labels, "loss": "visn_ce", } if config.visual_feat_loss: visual_losses["feat"] = { "shape": (-1, config.visual_feat_dim), "num": config.visual_feat_dim, "loss": "l2", } self.visual_losses = visual_losses @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) if self.config.task_obj_predict: obj_labels = {} if self.config.visual_attr_loss and self.config.task_obj_predict: obj_labels["attr"] = ( tf.ones([batch_size, num_visual_features]), tf.ones([batch_size, num_visual_features]), ) if self.config.visual_feat_loss and self.config.task_obj_predict: obj_labels["feat"] = ( tf.ones([batch_size, num_visual_features, self.config.visual_feat_dim]), tf.ones([batch_size, num_visual_features]), ) if self.config.visual_obj_loss and self.config.task_obj_predict: obj_labels["obj"] = ( tf.ones([batch_size, num_visual_features]), tf.ones([batch_size, num_visual_features]), ) return { **{ "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": visual_pos, }, **({"obj_labels": obj_labels} if self.config.task_obj_predict else {}), } def get_lm_head(self): return self.cls.predictions def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.cls.name + "/" + self.cls.predictions.name @unpack_inputs @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFLxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, visual_feats: tf.Tensor | None = None, visual_pos: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, visual_attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, masked_lm_labels: tf.Tensor | None = None, obj_labels: Dict[str, Tuple[tf.Tensor, tf.Tensor]] | None = None, matched_label: tf.Tensor | None = None, ans: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> Tuple[tf.Tensor] | TFLxmertForPreTrainingOutput: r""" masked_lm_labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` obj_labels (`Dict[Str: Tuple[tf.Tensor, tf.Tensor]]`, *optional*, defaults to `None`): each key is named after each one of the visual losses and each element of the tuple is of the shape `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and the label score respectively matched_label (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates that the sentence does not match the image, - 1 indicates that the sentence does match the image. ans (`tf.Tensor` of shape `(batch_size)`, *optional*, defaults to `None`): a one hot representation hof the correct answer *optional* Returns: """ lxmert_output = self.lxmert( input_ids, visual_feats, visual_pos, attention_mask, visual_attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict, training, ) lang_output, visual_output, pooled_output = ( lxmert_output[0], lxmert_output[1], lxmert_output[2], ) lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output) if self.task_qa: answer_score = self.answer_head(pooled_output) else: answer_score = pooled_output[0][0] total_loss = ( None if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None) else tf.constant(0.0) ) losses = () if masked_lm_labels is not None and self.task_mask_lm: masked_lm_loss = self.loss_fcts["ce"]( tf.reshape(masked_lm_labels, [-1]), tf.reshape(lang_prediction_scores, [-1, self.config.vocab_size]), ) total_loss += masked_lm_loss losses += (masked_lm_loss,) if matched_label is not None and self.task_matched: matched_loss = self.loss_fcts["ce"]( tf.reshape(matched_label, [-1]), tf.reshape(cross_relationship_score, [-1, 2]), ) total_loss += matched_loss losses += (matched_loss,) if obj_labels is not None and self.task_obj_predict: total_visn_loss = 0.0 visn_prediction_scores_dict = self.obj_predict_head(visual_output) for key, key_info in self.visual_losses.items(): label, mask_conf = obj_labels[key] output_dim = key_info["num"] loss_fct_name = key_info["loss"] label_shape = key_info["shape"] weight = self.visual_loss_normalizer visn_loss_fct = self.loss_fcts[loss_fct_name] visn_prediction_scores = visn_prediction_scores_dict[key] visn_loss = visn_loss_fct( tf.reshape(label, label_shape), tf.reshape(visn_prediction_scores, [-1, output_dim]), ) if visn_loss.ndim > 1: # Regression Losses visn_loss = tf.reduce_mean(visn_loss) visn_loss = tf.reduce_mean(visn_loss * tf.cast(tf.reshape(mask_conf, [-1]), visn_loss.dtype)) * weight total_visn_loss += visn_loss losses += (visn_loss,) total_loss += total_visn_loss if ans is not None and self.task_qa: answer_loss = self.loss_fcts["ce"]( tf.reshape(ans, [-1]), tf.reshape(answer_score, [-1, self.num_qa_labels]) ) # exclude "*2" here to match the effect of QA losses. # Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper) # Now : (loss *1) for 12 epochs # # * 2 # Multiply by 2 because > half of the data will not have label total_loss += answer_loss losses += (answer_loss,) # return total_loss, tf.stack(losses)[tf.new_axis, ...], answer_score.detach() if not return_dict: output = ( lang_prediction_scores, cross_relationship_score, answer_score, ) + lxmert_output[3:] return ((total_loss,) + output) if total_loss is not None else output return TFLxmertForPreTrainingOutput( loss=total_loss, prediction_logits=lang_prediction_scores, cross_relationship_score=cross_relationship_score, question_answering_score=answer_score, language_hidden_states=lxmert_output.language_hidden_states, vision_hidden_states=lxmert_output.vision_hidden_states, language_attentions=lxmert_output.language_attentions, vision_attentions=lxmert_output.vision_attentions, cross_encoder_attentions=lxmert_output.cross_encoder_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "lxmert", None) is not None: with tf.name_scope(self.lxmert.name): self.lxmert.build(None) if getattr(self, "cls", None) is not None: with tf.name_scope(self.cls.name): self.cls.build(None) if getattr(self, "obj_predict_head", None) is not None: with tf.name_scope(self.obj_predict_head.name): self.obj_predict_head.build(None) if getattr(self, "answer_head", None) is not None: with tf.name_scope(self.answer_head.name): self.answer_head.build(None)
transformers/src/transformers/models/lxmert/modeling_tf_lxmert.py/0
{ "file_path": "transformers/src/transformers/models/lxmert/modeling_tf_lxmert.py", "repo_id": "transformers", "token_count": 31854 }
329
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc.and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MaskFormer model configuration""" from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/maskformer-swin-base-ade": ( "https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } logger = logging.get_logger(__name__) class MaskFormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MaskFormerModel`]. It is used to instantiate a MaskFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MaskFormer [facebook/maskformer-swin-base-ade](https://huggingface.co/facebook/maskformer-swin-base-ade) architecture trained on [ADE20k-150](https://huggingface.co/datasets/scene_parse_150). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Currently, MaskFormer only supports the [Swin Transformer](swin) as backbone. Args: mask_feature_size (`int`, *optional*, defaults to 256): The masks' features size, this value will also be used to specify the Feature Pyramid Network features' size. no_object_weight (`float`, *optional*, defaults to 0.1): Weight to apply to the null (no object) class. use_auxiliary_loss(`bool`, *optional*, defaults to `False`): If `True` [`MaskFormerForInstanceSegmentationOutput`] will contain the auxiliary losses computed using the logits from each decoder's stage. backbone_config (`Dict`, *optional*): The configuration passed to the backbone, if unset, the configuration corresponding to `swin-base-patch4-window12-384` will be used. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. decoder_config (`Dict`, *optional*): The configuration passed to the transformer decoder model, if unset the base config for `detr-resnet-50` will be used. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1): The scaling factor used for the Xavier initialization gain in the HM Attention map module. dice_weight (`float`, *optional*, defaults to 1.0): The weight for the dice loss. cross_entropy_weight (`float`, *optional*, defaults to 1.0): The weight for the cross entropy loss. mask_weight (`float`, *optional*, defaults to 20.0): The weight for the mask loss. output_auxiliary_logits (`bool`, *optional*): Should the model output its `auxiliary_logits` or not. Raises: `ValueError`: Raised if the backbone model type selected is not in `["swin"]` or the decoder model type selected is not in `["detr"]` Examples: ```python >>> from transformers import MaskFormerConfig, MaskFormerModel >>> # Initializing a MaskFormer facebook/maskformer-swin-base-ade configuration >>> configuration = MaskFormerConfig() >>> # Initializing a model (with random weights) from the facebook/maskformer-swin-base-ade style configuration >>> model = MaskFormerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "maskformer" attribute_map = {"hidden_size": "mask_feature_size"} backbones_supported = ["resnet", "swin"] decoders_supported = ["detr"] def __init__( self, fpn_feature_size: int = 256, mask_feature_size: int = 256, no_object_weight: float = 0.1, use_auxiliary_loss: bool = False, backbone_config: Optional[Dict] = None, decoder_config: Optional[Dict] = None, init_std: float = 0.02, init_xavier_std: float = 1.0, dice_weight: float = 1.0, cross_entropy_weight: float = 1.0, mask_weight: float = 20.0, output_auxiliary_logits: Optional[bool] = None, backbone: Optional[str] = None, use_pretrained_backbone: bool = False, use_timm_backbone: bool = False, backbone_kwargs: Optional[Dict] = None, **kwargs, ): if use_pretrained_backbone: raise ValueError("Pretrained backbones are not supported yet.") if backbone_config is not None and backbone is not None: raise ValueError("You can't specify both `backbone` and `backbone_config`.") if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None: raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") if backbone_config is None and backbone is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k backbone_config = SwinConfig( image_size=384, in_channels=3, patch_size=4, embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12, drop_path_rate=0.3, out_features=["stage1", "stage2", "stage3", "stage4"], ) if isinstance(backbone_config, dict): backbone_model_type = backbone_config.pop("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) # verify that the backbone is supported if backbone_config is not None and backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. " f"Supported model types: {','.join(self.backbones_supported)}" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 decoder_config = DetrConfig() else: # verify that the decoder is supported decoder_type = ( decoder_config.pop("model_type") if isinstance(decoder_config, dict) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f"Transformer Decoder {decoder_type} not supported, please use one of" f" {','.join(self.decoders_supported)}" ) if isinstance(decoder_config, dict): config_class = CONFIG_MAPPING[decoder_type] decoder_config = config_class.from_dict(decoder_config) self.backbone_config = backbone_config self.decoder_config = decoder_config # main feature dimension for the model self.fpn_feature_size = fpn_feature_size self.mask_feature_size = mask_feature_size # initializer self.init_std = init_std self.init_xavier_std = init_xavier_std # Hungarian matcher && loss self.cross_entropy_weight = cross_entropy_weight self.dice_weight = dice_weight self.mask_weight = mask_weight self.use_auxiliary_loss = use_auxiliary_loss self.no_object_weight = no_object_weight self.output_auxiliary_logits = output_auxiliary_logits self.num_attention_heads = self.decoder_config.encoder_attention_heads self.num_hidden_layers = self.decoder_config.num_hidden_layers self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs super().__init__(**kwargs) @classmethod def from_backbone_and_decoder_configs( cls, backbone_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs ): """Instantiate a [`MaskFormerConfig`] (or a derived class) from a pre-trained backbone model configuration and DETR model configuration. Args: backbone_config ([`PretrainedConfig`]): The backbone configuration. decoder_config ([`PretrainedConfig`]): The transformer decoder configuration to use. Returns: [`MaskFormerConfig`]: An instance of a configuration object """ return cls( backbone_config=backbone_config, decoder_config=decoder_config, **kwargs, )
transformers/src/transformers/models/maskformer/configuration_maskformer.py/0
{ "file_path": "transformers/src/transformers/models/maskformer/configuration_maskformer.py", "repo_id": "transformers", "token_count": 4190 }
330
# coding=utf-8 # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: MBartTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/mbart-large-en-ro": 1024, "facebook/mbart-large-cc25": 1024, } FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] # fmt: skip class MBartTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" MBART tokenizer (backed by HuggingFace's *tokenizers* library). Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code> <tokens> <eos>` for target language documents. Examples: ```python >>> from transformers import MBartTokenizerFast >>> tokenizer = MBartTokenizerFast.from_pretrained( ... "facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO" ... ) >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria" >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt") ```""" vocab_files_names = VOCAB_FILES_NAMES max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = MBartTokenizer prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__( self, vocab_file=None, tokenizer_file=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", src_lang=None, tgt_lang=None, additional_special_tokens=None, **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token _additional_special_tokens = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) super().__init__( vocab_file=vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=_additional_special_tokens, **kwargs, ) self.vocab_file = vocab_file self.lang_code_to_id = { lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES } self._src_lang = src_lang if src_lang is not None else "en_XX" self.cur_lang_code = self.convert_tokens_to_ids(self._src_lang) self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An MBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def _build_translation_inputs( self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs ): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(tgt_lang) inputs["forced_bos_token_id"] = tgt_lang_id return inputs def prepare_seq2seq_batch( self, src_texts: List[str], src_lang: str = "en_XX", tgt_texts: Optional[List[str]] = None, tgt_lang: str = "ro_RO", **kwargs, ) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].""" self.cur_lang_code = self.convert_tokens_to_ids(src_lang) self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].""" self.cur_lang_code = self.convert_tokens_to_ids(lang) self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens) suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens) self._tokenizer.post_processor = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str, pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory.") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
transformers/src/transformers/models/mbart/tokenization_mbart_fast.py/0
{ "file_path": "transformers/src/transformers/models/mbart/tokenization_mbart_fast.py", "repo_id": "transformers", "token_count": 5169 }
331
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MGP-STR model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP = { "alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json", } class MgpstrConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`MgpstrModel`]. It is used to instantiate an MGP-STR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MGP-STR [alibaba-damo/mgp-str-base](https://huggingface.co/alibaba-damo/mgp-str-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`List[int]`, *optional*, defaults to `[32, 128]`): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. max_token_length (`int`, *optional*, defaults to 27): The max number of output tokens. num_character_labels (`int`, *optional*, defaults to 38): The number of classes for character head . num_bpe_labels (`int`, *optional*, defaults to 50257): The number of classes for bpe head . num_wordpiece_labels (`int`, *optional*, defaults to 30522): The number of classes for wordpiece head . hidden_size (`int`, *optional*, defaults to 768): The embedding dimension. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. mlp_ratio (`float`, *optional*, defaults to 4.0): The ratio of mlp hidden dim to embedding dim. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. distilled (`bool`, *optional*, defaults to `False`): Model includes a distillation token and head as in DeiT models. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. drop_rate (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder. attn_drop_rate (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.0): The stochastic depth rate. output_a3_attentions (`bool`, *optional*, defaults to `False`): Whether or not the model should returns A^3 module attentions. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import MgpstrConfig, MgpstrForSceneTextRecognition >>> # Initializing a Mgpstr mgp-str-base style configuration >>> configuration = MgpstrConfig() >>> # Initializing a model (with random weights) from the mgp-str-base style configuration >>> model = MgpstrForSceneTextRecognition(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mgp-str" def __init__( self, image_size=[32, 128], patch_size=4, num_channels=3, max_token_length=27, num_character_labels=38, num_bpe_labels=50257, num_wordpiece_labels=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, mlp_ratio=4.0, qkv_bias=True, distilled=False, layer_norm_eps=1e-5, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, output_a3_attentions=False, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.max_token_length = max_token_length self.num_character_labels = num_character_labels self.num_bpe_labels = num_bpe_labels self.num_wordpiece_labels = num_wordpiece_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.mlp_ratio = mlp_ratio self.distilled = distilled self.layer_norm_eps = layer_norm_eps self.drop_rate = drop_rate self.qkv_bias = qkv_bias self.attn_drop_rate = attn_drop_rate self.drop_path_rate = drop_path_rate self.output_a3_attentions = output_a3_attentions self.initializer_range = initializer_range
transformers/src/transformers/models/mgp_str/configuration_mgp_str.py/0
{ "file_path": "transformers/src/transformers/models/mgp_str/configuration_mgp_str.py", "repo_id": "transformers", "token_count": 2316 }
332
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MPNet model.""" import math from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mpnet import MPNetConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "microsoft/mpnet-base" _CONFIG_FOR_DOC = "MPNetConfig" MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/mpnet-base", ] class MPNetPreTrainedModel(PreTrainedModel): config_class = MPNetConfig pretrained_model_archive_map = MPNET_PRETRAINED_MODEL_ARCHIVE_LIST base_model_prefix = "mpnet" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class MPNetEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.padding_idx = 1 self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx) self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, **kwargs): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class MPNetSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.q = nn.Linear(config.hidden_size, self.all_head_size) self.k = nn.Linear(config.hidden_size, self.all_head_size) self.v = nn.Linear(config.hidden_size, self.all_head_size) self.o = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs, ): q = self.q(hidden_states) k = self.k(hidden_states) v = self.v(hidden_states) q = self.transpose_for_scores(q) k = self.transpose_for_scores(k) v = self.transpose_for_scores(v) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(q, k.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply relative position embedding (precomputed in MPNetEncoder) if provided. if position_bias is not None: attention_scores += position_bias if attention_mask is not None: attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask c = torch.matmul(attention_probs, v) c = c.permute(0, 2, 1, 3).contiguous() new_c_shape = c.size()[:-2] + (self.all_head_size,) c = c.view(*new_c_shape) o = self.o(c) outputs = (o, attention_probs) if output_attentions else (o,) return outputs class MPNetAttention(nn.Module): def __init__(self, config): super().__init__() self.attn = MPNetSelfAttention(config) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attn.num_attention_heads, self.attn.attention_head_size, self.pruned_heads ) self.attn.q = prune_linear_layer(self.attn.q, index) self.attn.k = prune_linear_layer(self.attn.k, index) self.attn.v = prune_linear_layer(self.attn.v, index) self.attn.o = prune_linear_layer(self.attn.o, index, dim=1) self.attn.num_attention_heads = self.attn.num_attention_heads - len(heads) self.attn.all_head_size = self.attn.attention_head_size * self.attn.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs, ): self_outputs = self.attn( hidden_states, attention_mask, head_mask, position_bias, output_attentions=output_attentions, ) attention_output = self.LayerNorm(self.dropout(self_outputs[0]) + hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class MPNetIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class MPNetOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MPNetLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = MPNetAttention(config) self.intermediate = MPNetIntermediate(config) self.output = MPNetOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, position_bias=None, output_attentions=False, **kwargs, ): self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, position_bias=position_bias, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + outputs return outputs class MPNetEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.n_heads = config.num_attention_heads self.layer = nn.ModuleList([MPNetLayer(config) for _ in range(config.num_hidden_layers)]) self.relative_attention_bias = nn.Embedding(config.relative_attention_num_buckets, self.n_heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = False, **kwargs, ): position_bias = self.compute_position_bias(hidden_states) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], position_bias, output_attentions=output_attentions, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, ) def compute_position_bias(self, x, position_ids=None, num_buckets=32): bsz, qlen, klen = x.size(0), x.size(1), x.size(1) if position_ids is not None: context_position = position_ids[:, :, None] memory_position = position_ids[:, None, :] else: context_position = torch.arange(qlen, dtype=torch.long)[:, None] memory_position = torch.arange(klen, dtype=torch.long)[None, :] relative_position = memory_position - context_position rp_bucket = self.relative_position_bucket(relative_position, num_buckets=num_buckets) rp_bucket = rp_bucket.to(x.device) values = self.relative_attention_bias(rp_bucket) values = values.permute([2, 0, 1]).unsqueeze(0) values = values.expand((bsz, -1, qlen, klen)).contiguous() return values @staticmethod def relative_position_bucket(relative_position, num_buckets=32, max_distance=128): ret = 0 n = -relative_position num_buckets //= 2 ret += (n < 0).to(torch.long) * num_buckets n = torch.abs(n) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret # Copied from transformers.models.bert.modeling_bert.BertPooler class MPNetPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output MPNET_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MPNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MPNET_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.", MPNET_START_DOCSTRING, ) class MPNetModel(MPNetPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = MPNetEmbeddings(config) self.encoder = MPNetEncoder(config) self.pooler = MPNetPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class MPNetForMaskedLM(MPNetPreTrainedModel): _tied_weights_keys = ["lm_head.decoder"] def __init__(self, config): super().__init__(config) self.mpnet = MPNetModel(config, add_pooling_layer=False) self.lm_head = MPNetLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class MPNetLMHead(nn.Module): """MPNet Head for masked and permuted language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x @add_start_docstrings( """ MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MPNET_START_DOCSTRING, ) class MPNetForSequenceClassification(MPNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.classifier = MPNetClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, MPNET_START_DOCSTRING, ) class MPNetForMultipleChoice(MPNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.mpnet = MPNetModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.mpnet( flat_input_ids, position_ids=flat_position_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, MPNET_START_DOCSTRING, ) class MPNetForTokenClassification(MPNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class MPNetClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to BERT's [CLS] token) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """ MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MPNET_START_DOCSTRING, ) class MPNetForQuestionAnswering(MPNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mpnet = MPNetModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mpnet( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. :param torch.Tensor x: :return torch.Tensor: """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask return incremental_indices.long() + padding_idx
transformers/src/transformers/models/mpnet/modeling_mpnet.py/0
{ "file_path": "transformers/src/transformers/models/mpnet/modeling_mpnet.py", "repo_id": "transformers", "token_count": 18262 }
333
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_musicgen": [ "MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", "MusicgenConfig", "MusicgenDecoderConfig", ], "processing_musicgen": ["MusicgenProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_musicgen"] = [ "MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST", "MusicgenForConditionalGeneration", "MusicgenForCausalLM", "MusicgenModel", "MusicgenPreTrainedModel", ] if TYPE_CHECKING: from .configuration_musicgen import ( MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, MusicgenConfig, MusicgenDecoderConfig, ) from .processing_musicgen import MusicgenProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_musicgen import ( MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST, MusicgenForCausalLM, MusicgenForConditionalGeneration, MusicgenModel, MusicgenPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/musicgen/__init__.py/0
{ "file_path": "transformers/src/transformers/models/musicgen/__init__.py", "repo_id": "transformers", "token_count": 799 }
334
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {"configuration_nat": ["NAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "NatConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_nat"] = [ "NAT_PRETRAINED_MODEL_ARCHIVE_LIST", "NatForImageClassification", "NatModel", "NatPreTrainedModel", "NatBackbone", ] if TYPE_CHECKING: from .configuration_nat import NAT_PRETRAINED_CONFIG_ARCHIVE_MAP, NatConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nat import ( NAT_PRETRAINED_MODEL_ARCHIVE_LIST, NatBackbone, NatForImageClassification, NatModel, NatPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/nat/__init__.py/0
{ "file_path": "transformers/src/transformers/models/nat/__init__.py", "repo_id": "transformers", "token_count": 657 }
335
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for Nougat. """ from typing import Dict, List, Optional, Union from transformers.tokenization_utils_base import PreTokenizedInput, TextInput, TruncationStrategy from ...processing_utils import ProcessorMixin from ...utils import PaddingStrategy, TensorType class NougatProcessor(ProcessorMixin): r""" Constructs a Nougat processor which wraps a Nougat image processor and a Nougat tokenizer into a single processor. [`NougatProcessor`] offers all the functionalities of [`NougatImageProcessor`] and [`NougatTokenizerFast`]. See the [`~NougatProcessor.__call__`] and [`~NougatProcessor.decode`] for more information. Args: image_processor ([`NougatImageProcessor`]): An instance of [`NougatImageProcessor`]. The image processor is a required input. tokenizer ([`NougatTokenizerFast`]): An instance of [`NougatTokenizerFast`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor def __call__( self, images=None, text=None, do_crop_margin: bool = None, do_resize: bool = None, size: Dict[str, int] = None, resample: "PILImageResampling" = None, # noqa: F821 do_thumbnail: bool = None, do_align_long_axis: bool = None, do_pad: bool = None, do_rescale: bool = None, rescale_factor: Union[int, float] = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, data_format: Optional["ChannelDimension"] = "channels_first", # noqa: F821 input_data_format: Optional[Union[str, "ChannelDimension"]] = None, # noqa: F821 text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, text_pair_target: Optional[ Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] ] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, ): if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") if images is not None: inputs = self.image_processor( images, do_crop_margin=do_crop_margin, do_resize=do_resize, size=size, resample=resample, do_thumbnail=do_thumbnail, do_align_long_axis=do_align_long_axis, do_pad=do_pad, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, return_tensors=return_tensors, data_format=data_format, input_data_format=input_data_format, ) if text is not None: encodings = self.tokenizer( text, text_pair=text_pair, text_target=text_target, text_pair_target=text_pair_target, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, ) if text is None: return inputs elif images is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) def post_process_generation(self, *args, **kwargs): """ This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.post_process_generation`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.post_process_generation(*args, **kwargs)
transformers/src/transformers/models/nougat/processing_nougat.py/0
{ "file_path": "transformers/src/transformers/models/nougat/processing_nougat.py", "repo_id": "transformers", "token_count": 2932 }
336
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 OpenAI GPT model.""" from __future__ import annotations from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput, TFSequenceClassifierOutput from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFConv1D, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, TFSequenceSummary, TFSharedEmbeddings, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_openai import OpenAIGPTConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "openai-community/openai-gpt" _CONFIG_FOR_DOC = "OpenAIGPTConfig" TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "openai-community/openai-gpt", # See all OpenAI GPT models at https://huggingface.co/models?filter=openai-community/openai-gpt ] class TFAttention(keras.layers.Layer): def __init__(self, nx, config, scale=False, **kwargs): super().__init__(**kwargs) n_state = nx # in Attention: n_state=768 (nx=n_embd) # [switch nx => n_state from Block to Attention to keep identical to TF implementation] assert ( n_state % config.n_head == 0 ), f"Hidden dimension {n_state} not dividable by number of heads {config.n_head}" self.n_head = config.n_head self.split_size = n_state self.scale = scale self.output_attentions = config.output_attentions self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn") self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj") self.attn_dropout = keras.layers.Dropout(config.attn_pdrop) self.resid_dropout = keras.layers.Dropout(config.resid_pdrop) self.n_state = n_state self.pruned_heads = set() def prune_heads(self, heads): pass @staticmethod def causal_attention_mask(nd, ns): """ 1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs. """ i = tf.range(nd)[:, None] j = tf.range(ns) m = i >= j - ns + nd return m def _attn(self, q, k, v, attention_mask, head_mask, output_attentions, training=False): # q, k, v have shape [batch, heads, sequence, features] w = tf.matmul(q, k, transpose_b=True) if self.scale: dk = tf.cast(shape_list(k)[-1], dtype=w.dtype) # scale attention_scores w = w / tf.math.sqrt(dk) # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst. _, _, nd, ns = shape_list(w) b = tf.cast(self.causal_attention_mask(nd, ns), dtype=w.dtype) b = tf.reshape(b, [1, 1, nd, ns]) w = w * b - 1e4 * (1 - b) if attention_mask is not None: # Apply the attention mask attention_mask = tf.cast(attention_mask, dtype=w.dtype) w = w + attention_mask w = stable_softmax(w, axis=-1) w = self.attn_dropout(w, training=training) # Mask heads if we want to if head_mask is not None: w = w * head_mask outputs = [tf.matmul(w, v)] if output_attentions: outputs.append(w) return outputs def merge_heads(self, x): x = tf.transpose(x, [0, 2, 1, 3]) x_shape = shape_list(x) new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]] return tf.reshape(x, new_x_shape) def split_heads(self, x): x_shape = shape_list(x) new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head] x = tf.reshape(x, new_x_shape) return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features) def call(self, x, attention_mask, head_mask, output_attentions, training=False): x = self.c_attn(x) query, key, value = tf.split(x, 3, axis=2) query = self.split_heads(query) key = self.split_heads(key) value = self.split_heads(value) attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions, training=training) a = attn_outputs[0] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a, training=training) outputs = [a] + attn_outputs[1:] return outputs # a, (attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "c_attn", None) is not None: with tf.name_scope(self.c_attn.name): self.c_attn.build([None, None, self.n_state * 3]) if getattr(self, "c_proj", None) is not None: with tf.name_scope(self.c_proj.name): self.c_proj.build([None, None, self.n_state]) class TFMLP(keras.layers.Layer): def __init__(self, n_state, config, **kwargs): super().__init__(**kwargs) nx = config.n_embd self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc") self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj") self.act = get_tf_activation("gelu") self.dropout = keras.layers.Dropout(config.resid_pdrop) self.nx = nx self.n_state = n_state def call(self, x, training=False): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) h2 = self.dropout(h2, training=training) return h2 def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "c_fc", None) is not None: with tf.name_scope(self.c_fc.name): self.c_fc.build([None, None, self.n_state]) if getattr(self, "c_proj", None) is not None: with tf.name_scope(self.c_proj.name): self.c_proj.build([None, None, self.nx]) class TFBlock(keras.layers.Layer): def __init__(self, config, scale=False, **kwargs): super().__init__(**kwargs) nx = config.n_embd self.attn = TFAttention(nx, config, scale, name="attn") self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1") self.mlp = TFMLP(4 * nx, config, name="mlp") self.ln_2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2") self.nx = nx def call(self, x, attention_mask, head_mask, output_attentions, training=False): output_attn = self.attn(x, attention_mask, head_mask, output_attentions, training=training) a = output_attn[0] # output_attn: a, (attentions) n = self.ln_1(x + a) m = self.mlp(n, training=training) h = self.ln_2(n + m) outputs = [h] + output_attn[1:] return outputs # x, (attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attn", None) is not None: with tf.name_scope(self.attn.name): self.attn.build(None) if getattr(self, "ln_1", None) is not None: with tf.name_scope(self.ln_1.name): self.ln_1.build([None, None, self.nx]) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, "ln_2", None) is not None: with tf.name_scope(self.ln_2.name): self.ln_2.build([None, None, self.nx]) @keras_serializable class TFOpenAIGPTMainLayer(keras.layers.Layer): config_class = OpenAIGPTConfig def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) self.config = config self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.return_dict = config.use_return_dict self.num_hidden_layers = config.n_layer self.n_embd = config.n_embd self.n_positions = config.n_positions self.initializer_range = config.initializer_range self.tokens_embed = TFSharedEmbeddings( config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="tokens_embed" ) self.drop = keras.layers.Dropout(config.embd_pdrop) self.h = [TFBlock(config, scale=True, name=f"h_._{i}") for i in range(config.n_layer)] def build(self, input_shape=None): with tf.name_scope("positions_embed"): self.positions_embed = self.add_weight( name="embeddings", shape=[self.n_positions, self.n_embd], initializer=get_initializer(self.initializer_range), ) if self.built: return self.built = True if getattr(self, "tokens_embed", None) is not None: with tf.name_scope(self.tokens_embed.name): self.tokens_embed.build(None) if getattr(self, "h", None) is not None: for layer in self.h: with tf.name_scope(layer.name): layer.build(None) def get_input_embeddings(self): return self.tokens_embed def set_input_embeddings(self, value): self.tokens_embed.weight = value self.tokens_embed.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ raise NotImplementedError @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFBaseModelOutput]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if position_ids is None: position_ids = tf.expand_dims(tf.range(input_shape[-1]), axis=0) if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. one_cst = tf.constant(1.0) attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype) attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0)) else: attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = self.tokens_embed(input_ids, mode="embedding") position_embeds = tf.gather(self.positions_embed, position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) check_embeddings_within_bounds(token_type_ids, self.config.vocab_size, "token_type_ids") token_type_embeds = self.tokens_embed(token_type_ids, mode="embedding") else: token_type_embeds = 0 hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, block in enumerate(self.h): if output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = block( hidden_states, attention_mask, head_mask[i], output_attentions, training=training, ) hidden_states = outputs[0] if output_attentions: all_attentions = all_attentions + (outputs[1],) hidden_states = tf.reshape(hidden_states, output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, ) class TFOpenAIGPTPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = OpenAIGPTConfig base_model_prefix = "transformer" @dataclass class TFOpenAIGPTDoubleHeadsModelOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: logits (`tf.Tensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). mc_logits (`tf.Tensor` of shape `(batch_size, num_choices)`): Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: tf.Tensor = None mc_logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None OPENAI_GPT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`OpenAIGPTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ OPENAI_GPT_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`tf.Tensor` or `Numpy array` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.", OPENAI_GPT_START_DOCSTRING, ) class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFOpenAIGPTMainLayer(config, name="transformer") @unpack_inputs @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFBaseModelOutput]: outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) @add_start_docstrings( """ OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, OPENAI_GPT_START_DOCSTRING, ) class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFOpenAIGPTMainLayer(config, name="transformer") # OpenAIGPT does not have past caching features self.supports_xla_generation = False def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) @unpack_inputs @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFCausalLMOutput]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ transformer_outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] logits = self.transformer.tokens_embed(hidden_states, mode="linear") loss = None if labels is not None: # shift labels to the left and cut last logit token shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels, shifted_logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def prepare_inputs_for_generation(self, inputs, **kwargs): return {"input_ids": inputs} def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) @add_start_docstrings( """ OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence). """, OPENAI_GPT_START_DOCSTRING, ) class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) config.num_labels = 1 self.transformer = TFOpenAIGPTMainLayer(config, name="transformer") self.multiple_choice_head = TFSequenceSummary( config, initializer_range=config.initializer_range, name="multiple_choice_head" ) @unpack_inputs @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFOpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, mc_token_ids: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFOpenAIGPTDoubleHeadsModelOutput]: r""" mc_token_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - 1]`. Return: Examples: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, TFOpenAIGPTDoubleHeadsModel >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/openai-gpt") >>> model = TFOpenAIGPTDoubleHeadsModel.from_pretrained("openai-community/openai-gpt") >>> # Add a [CLS] to the vocabulary (we should train it also!) >>> tokenizer.add_special_tokens({"cls_token": "[CLS]"}) >>> model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size >>> print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] >>> encoding = tokenizer(choices, return_tensors="tf") >>> inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} >>> inputs["mc_token_ids"] = tf.constant( ... [inputs["input_ids"].shape[-1] - 1, inputs["input_ids"].shape[-1] - 1] ... )[ ... None, : ... ] # Batch size 1 >>> outputs = model(inputs) >>> lm_prediction_scores, mc_prediction_scores = outputs[:2] ```""" if input_ids is not None: input_shapes = shape_list(input_ids) else: input_shapes = shape_list(inputs_embeds)[:-1] seq_length = input_shapes[-1] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None transformer_outputs = self.transformer( flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:]) if return_dict and output_hidden_states: # We do this to match the slightly odd PT behaviour - the final hidden state is reshaped to rank 4 when the # input is rank 3, but all other hidden states remain at rank-3 (with the first 2 dims merged) all_hidden_states = transformer_outputs.hidden_states[:-1] + (hidden_states,) else: all_hidden_states = None lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear") mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids, training=training) mc_logits = tf.squeeze(mc_logits, axis=-1) if not return_dict: return (lm_logits, mc_logits) + transformer_outputs[1:] return TFOpenAIGPTDoubleHeadsModelOutput( logits=lm_logits, mc_logits=mc_logits, hidden_states=all_hidden_states, attentions=transformer_outputs.attentions, ) @property def input_signature(self): return { "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), "mc_token_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), } def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "multiple_choice_head", None) is not None: with tf.name_scope(self.multiple_choice_head.name): self.multiple_choice_head.build(None) @add_start_docstrings( """ The OpenAI GPT Model transformer with a sequence classification head on top (linear layer). [`TFOpenAIGPTForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, OPENAI_GPT_START_DOCSTRING, ) class TFOpenAIGPTForSequenceClassification(TFOpenAIGPTPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.score = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="score", use_bias=False, ) self.transformer = TFOpenAIGPTMainLayer(config, name="transformer") self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFSequenceClassifierOutput]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ transformer_outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) in_logits = None if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: sequence_lengths = ( tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1) - 1 ) sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1) in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1) else: sequence_lengths = -1 logger.warning( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) loss = None if labels is not None: if input_ids is not None: batch_size, sequence_length = shape_list(input_ids)[:2] else: batch_size, sequence_length = shape_list(inputs_embeds)[:2] assert ( self.config.pad_token_id is not None or batch_size == 1 ), "Cannot handle batch sizes > 1 if no padding token is defined." if not tf.is_tensor(sequence_lengths): in_logits = logits[0:batch_size, sequence_lengths] loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels])) pooled_logits = in_logits if in_logits is not None else logits if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=pooled_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "score", None) is not None: with tf.name_scope(self.score.name): self.score.build([None, None, self.config.n_embd]) if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None)
transformers/src/transformers/models/openai/modeling_tf_openai.py/0
{ "file_path": "transformers/src/transformers/models/openai/modeling_tf_openai.py", "repo_id": "transformers", "token_count": 17651 }
337
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OWL-ViT model configuration""" import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json", "google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json", "google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json", } class OwlViTTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`OwlViTTextModel`]. It is used to instantiate an OwlViT text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OwlViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 49408): Vocabulary size of the OWL-ViT text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OwlViTTextModel`]. hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 16): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token in the input sequences. bos_token_id (`int`, *optional*, defaults to 49406): The id of the beginning-of-sequence token in the input sequences. eos_token_id (`int`, *optional*, defaults to 49407): The id of the end-of-sequence token in the input sequences. Example: ```python >>> from transformers import OwlViTTextConfig, OwlViTTextModel >>> # Initializing a OwlViTTextModel with google/owlvit-base-patch32 style configuration >>> configuration = OwlViTTextConfig() >>> # Initializing a OwlViTTextConfig from the google/owlvit-base-patch32 style configuration >>> model = OwlViTTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "owlvit_text_model" def __init__( self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=16, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=0, bos_token_id=49406, eos_token_id=49407, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("model_type") == "owlvit": config_dict = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) class OwlViTVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`OwlViTVisionModel`]. It is used to instantiate an OWL-ViT image encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWL-ViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 768): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import OwlViTVisionConfig, OwlViTVisionModel >>> # Initializing a OwlViTVisionModel with google/owlvit-base-patch32 style configuration >>> configuration = OwlViTVisionConfig() >>> # Initializing a OwlViTVisionModel model from the google/owlvit-base-patch32 style configuration >>> model = OwlViTVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "owlvit_vision_model" def __init__( self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=768, patch_size=32, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("model_type") == "owlvit": config_dict = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) class OwlViTConfig(PretrainedConfig): r""" [`OwlViTConfig`] is the configuration class to store the configuration of an [`OwlViTModel`]. It is used to instantiate an OWL-ViT model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWL-ViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`OwlViTTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`OwlViTVisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The inital value of the *logit_scale* parameter. Default is used as per the original OWL-ViT implementation. return_dict (`bool`, *optional*, defaults to `True`): Whether or not the model should return a dictionary. If `False`, returns a tuple. kwargs (*optional*): Dictionary of keyword arguments. """ model_type = "owlvit" def __init__( self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, return_dict=True, **kwargs, ): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values.") if vision_config is None: vision_config = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values.") self.text_config = OwlViTTextConfig(**text_config) self.vision_config = OwlViTVisionConfig(**vision_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.return_dict = return_dict self.initializer_factor = 1.0 @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) @classmethod def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwargs): r""" Instantiate a [`OwlViTConfig`] (or a derived class) from owlvit text model configuration and owlvit vision model configuration. Returns: [`OwlViTConfig`]: An instance of a configuration object """ config_dict = {} config_dict["text_config"] = text_config config_dict["vision_config"] = vision_config return cls.from_dict(config_dict, **kwargs) class OwlViTOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4 def generate_dummy_inputs( self, processor: "ProcessorMixin", batch_size: int = -1, seq_length: int = -1, framework: Optional["TensorType"] = None, ) -> Mapping[str, Any]: text_input_dict = super().generate_dummy_inputs( processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework ) image_input_dict = super().generate_dummy_inputs( processor.image_processor, batch_size=batch_size, framework=framework ) return {**text_input_dict, **image_input_dict} @property def default_onnx_opset(self) -> int: return 14
transformers/src/transformers/models/owlvit/configuration_owlvit.py/0
{ "file_path": "transformers/src/transformers/models/owlvit/configuration_owlvit.py", "repo_id": "transformers", "token_count": 6648 }
338
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import warnings import flatdict import torch from transformers import LlamaTokenizer, PersimmonConfig, PersimmonForCausalLM try: from transformers import LlamaTokenizerFast tokenizer_class = LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) tokenizer_class = LlamaTokenizer """ Sample usage: ``` git clone https://github.com/persimmon-ai-labs/adept-inference wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_base_model_release.tar wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_chat_model_release.tar python src/transformers/models/persimmon/convert_persimmon_weights_to_hf.py --input_dir /path/to/downloaded/persimmon/weights/ --output_dir /output/path ``` Thereafter, models can be loaded via: ```py from transformers import PersimmonForCausalLM, PersimmonTokenizer model = PersimmonForCausalLM.from_pretrained("/output/path") tokenizer = PersimmonTokenizer.from_pretrained("/output/path") ``` Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). """ KEYS_TO_MODIFY_MAPPING = { "self_attention": "self_attn", "language_model.encoder": "model", "word_embeddings_for_head": "lm_head", "language_model.embedding.word_embeddings": "model.embed_tokens", } KEYS_TO_REMOVE = "rotary_emb.inv_freq" def rename_state_dict(state_dict): model_state_dict = {} for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) if KEYS_TO_REMOVE in key: continue model_state_dict[key] = value return model_state_dict def convert_persimmon_checkpoint(pytorch_dump_folder_path, ada_lib_path, pt_model_path, safe_serialization=False): import sys sys.path.insert(0, ada_lib_path) model_state_dict_base = torch.load(pt_model_path, map_location="cpu") state_dict = flatdict.FlatDict(model_state_dict_base["model"], ".") state_dict = rename_state_dict(state_dict) transformers_config = PersimmonConfig() model = PersimmonForCausalLM(transformers_config, eos_token_id=71013, bos_token_id=71013).to(torch.bfloat16) model.load_state_dict(state_dict) model.save_pretrained(pytorch_dump_folder_path, safe_serialization=safe_serialization) transformers_config.save_pretrained(pytorch_dump_folder_path) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--input_dir", help="Location of Persimmon weights, which contains tokenizer.model and model folders", ) parser.add_argument( "--pt_model_path", help="Location of Persimmon `model_optim_rng.pt`", ) parser.add_argument( "--output_dir", help="Location to write HF model and tokenizer", ) parser.add_argument( "--ada_lib_path", help="Location to write HF model and tokenizer", ) parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.") args = parser.parse_args() spm_path = os.path.join(args.input_dir, "adept_vocab.model") convert_persimmon_checkpoint( pytorch_dump_folder_path=args.output_dir, pt_model_path=args.pt_model_path, safe_serialization=args.safe_serialization, ada_lib_path=args.ada_lib_path, ) tokenizer = tokenizer_class(spm_path, bos_token="|ENDOFTEXT|", eos_token="|ENDOFTEXT|") tokenizer.save_pretrained(args.output_dir) if __name__ == "__main__": main()
transformers/src/transformers/models/persimmon/convert_persimmon_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/persimmon/convert_persimmon_weights_to_hf.py", "repo_id": "transformers", "token_count": 1749 }
339
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch from torch import nn from transformers import PLBartConfig, PLBartForConditionalGeneration, PLBartForSequenceClassification def remove_ignore_keys_(state_dict): ignore_keys = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] for k in ignore_keys: state_dict.pop(k, None) def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def convert_fairseq_plbart_checkpoint_from_disk( checkpoint_path, hf_config_path="uclanlp/plbart-base", finetuned=False, classification=False ): state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] remove_ignore_keys_(state_dict) vocab_size = state_dict["encoder.embed_tokens.weight"].shape[0] plbart_config = PLBartConfig.from_pretrained(hf_config_path, vocab_size=vocab_size) state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"] if not classification: model = PLBartForConditionalGeneration(plbart_config) model.model.load_state_dict(state_dict) if finetuned: model.lm_head = make_linear_from_emb(model.model.shared) else: classification_head = {} for key, value in state_dict.copy().items(): if key.startswith("classification_heads.sentence_classification_head"): classification_head[key.replace("classification_heads.sentence_classification_head.", "")] = value state_dict.pop(key) model = PLBartForSequenceClassification(plbart_config) model.model.load_state_dict(state_dict) model.classification_head.load_state_dict(classification_head) return model if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="uclanlp/plbart-base", type=str, help="Which huggingface architecture to use: plbart-base", ) parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") parser.add_argument( "--classification", action="store_true", help="whether the model is a classification checkpoint" ) args = parser.parse_args() model = convert_fairseq_plbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, classification=args.classification, ) model.save_pretrained(args.pytorch_dump_folder_path)
transformers/src/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py/0
{ "file_path": "transformers/src/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py", "repo_id": "transformers", "token_count": 1325 }
340
# coding=utf-8 # Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ RemBERT model configuration""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/config.json", # See all RemBERT models at https://huggingface.co/models?filter=rembert } class RemBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RemBertModel`]. It is used to instantiate an RemBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RemBERT [google/rembert](https://huggingface.co/google/rembert) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 250300): Vocabulary size of the RemBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RemBertModel`] or [`TFRemBertModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`RemBertModel`]. hidden_size (`int`, *optional*, defaults to 1152): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 18): Number of attention heads for each attention layer in the Transformer encoder. input_embedding_size (`int`, *optional*, defaults to 256): Dimensionality of the input embeddings. output_embedding_size (`int`, *optional*, defaults to 1664): Dimensionality of the output embeddings. intermediate_size (`int`, *optional*, defaults to 4608): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0): The dropout ratio for the attention probabilities. classifier_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the classifier layer when fine-tuning. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`RemBertModel`] or [`TFRemBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Example: ```python >>> from transformers import RemBertModel, RemBertConfig >>> # Initializing a RemBERT rembert style configuration >>> configuration = RemBertConfig() >>> # Initializing a model from the rembert style configuration >>> model = RemBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "rembert" def __init__( self, vocab_size=250300, hidden_size=1152, num_hidden_layers=32, num_attention_heads=18, input_embedding_size=256, output_embedding_size=1664, intermediate_size=4608, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, classifier_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, pad_token_id=0, bos_token_id=312, eos_token_id=313, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.input_embedding_size = input_embedding_size self.output_embedding_size = output_embedding_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.classifier_dropout_prob = classifier_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.tie_word_embeddings = False class RemBertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] ) @property def atol_for_validation(self) -> float: return 1e-4
transformers/src/transformers/models/rembert/configuration_rembert.py/0
{ "file_path": "transformers/src/transformers/models/rembert/configuration_rembert.py", "repo_id": "transformers", "token_count": 2855 }
341
# coding=utf-8 # Copyright 2023 The Meta AI Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TensorFlow SAM model. This file was mostly generated by auto-translation from the PyTorch original. In the event of a discrepancy, the original file should be regarded as the 'reference' version. """ from __future__ import annotations import collections from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import ACT2FN from ...modeling_tf_outputs import TFBaseModelOutput from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, keras, shape_list, unpack_inputs from ...tf_utils import flatten, functional_layernorm from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_sam import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "SamConfig" _CHECKPOINT_FOR_DOC = "facebook/sam-vit-huge" TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/sam-vit-huge", "facebook/sam-vit-large", "facebook/sam-vit-base", # See all SAM models at https://huggingface.co/models?filter=sam ] @dataclass class TFSamVisionEncoderOutput(ModelOutput): """ Base class for sam vision model's outputs that also contains image embeddings obtained by applying the projection layer to the pooler_output. Args: image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ image_embeds: tf.Tensor | None = None last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFSamImageSegmentationOutput(ModelOutput): """ Base class for Segment-Anything model's output Args: iou_scores (`tf.Tensor` of shape `(batch_size, num_masks)`): The iou scores of the predicted masks. pred_masks (`tf.Tensor` of shape `(batch_size, num_masks, height, width)`): The predicted low resolutions masks. Needs to be post-processed by the processor vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision model at the output of each layer plus the optional initial embedding outputs. vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. mask_decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ iou_scores: tf.Tensor = None pred_masks: tf.Tensor = None vision_hidden_states: Tuple[tf.Tensor, ...] | None = None vision_attentions: Tuple[tf.Tensor, ...] | None = None mask_decoder_attentions: Tuple[tf.Tensor, ...] | None = None class TFSamPatchEmbeddings(keras.layers.Layer): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config, **kwargs): super().__init__(**kwargs) image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = keras.layers.Conv2D( hidden_size, kernel_size=patch_size, strides=patch_size, name="projection" ) def call(self, pixel_values): batch_size, num_channels, height, width = shape_list(pixel_values) if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) embeddings = self.projection(tf.transpose(pixel_values, perm=[0, 2, 3, 1])) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "projection", None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, None, self.num_channels]) class TFSamMLPBlock(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.lin1 = keras.layers.Dense(config.mlp_dim, name="lin1") self.lin2 = keras.layers.Dense(config.hidden_size, name="lin2") self.act = ACT2FN[config.hidden_act] self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.lin1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.lin2(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "lin1", None) is not None: with tf.name_scope(self.lin1.name): self.lin1.build([None, None, self.config.hidden_size]) if getattr(self, "lin2", None) is not None: with tf.name_scope(self.lin2.name): self.lin2.build([None, None, self.config.mlp_dim]) class TFSamLayerNorm(keras.layers.Layer): r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last", **kwargs): super().__init__(**kwargs) self.eps = eps self.data_format = data_format self.normalized_shape = normalized_shape if self.data_format not in ["channels_last", "channels_first"]: raise NotImplementedError(f"Unsupported data format: {self.data_format}") def build(self, input_shape): self.weight = self.add_weight(shape=self.normalized_shape, initializer="ones", name="weight") self.bias = self.add_weight(shape=self.normalized_shape, initializer="zeros", name="bias") super().build(input_shape) def call(self, x: tf.Tensor) -> tf.Tensor: if self.data_format == "channels_last": x = functional_layernorm(x, weight=self.weight, bias=self.bias, epsilon=self.eps, axis=-1) elif self.data_format == "channels_first": x = functional_layernorm(x, weight=self.weight, bias=self.bias, epsilon=self.eps, axis=1) return x class TFSamAttention(keras.layers.Layer): """ SAM's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and values. """ def __init__(self, config, downsample_rate=None, **kwargs): super().__init__(**kwargs) self.hidden_size = config.hidden_size downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate self.internal_dim = config.hidden_size // downsample_rate self.num_attention_heads = config.num_attention_heads if self.internal_dim % config.num_attention_heads != 0: raise ValueError("num_attention_heads must divide hidden_size.") self.q_proj = keras.layers.Dense(self.internal_dim, name="q_proj") self.k_proj = keras.layers.Dense(self.internal_dim, name="k_proj") self.v_proj = keras.layers.Dense(self.internal_dim, name="v_proj") self.out_proj = keras.layers.Dense(self.hidden_size, name="out_proj") def _separate_heads(self, hidden_states: tf.Tensor, num_attention_heads: int) -> tf.Tensor: batch, point_batch_size, n_tokens, channel = shape_list(hidden_states) c_per_head = channel // num_attention_heads hidden_states = tf.reshape( hidden_states, (batch * point_batch_size, n_tokens, num_attention_heads, c_per_head) ) return tf.transpose(hidden_states, perm=[0, 2, 1, 3]) def _recombine_heads(self, hidden_states: tf.Tensor, point_batch_size: int) -> tf.Tensor: batch, n_heads, n_tokens, c_per_head = shape_list(hidden_states) hidden_states = tf.transpose(hidden_states, perm=[0, 2, 1, 3]) return tf.reshape( hidden_states, (batch // tf.reduce_max([1, point_batch_size]), point_batch_size, n_tokens, n_heads * c_per_head), ) def call(self, query: tf.Tensor, key: tf.Tensor, value: tf.Tensor) -> tf.Tensor: # Input projections query = self.q_proj(query) key = self.k_proj(key) value = self.v_proj(value) point_batch_size = shape_list(query)[1] # Separate into heads query = self._separate_heads(query, self.num_attention_heads) key = self._separate_heads(key, self.num_attention_heads) value = self._separate_heads(value, self.num_attention_heads) # SamAttention _, _, _, c_per_head = shape_list(query) attn = tf.matmul( query, tf.transpose(key, perm=[0, 1, 3, 2]) ) # batch_size * point_batch_size x N_heads x N_tokens x N_tokens attn = attn / tf.math.sqrt(float(c_per_head)) attn = tf.nn.softmax(attn, axis=-1) # Get output out = tf.matmul(attn, value) out = self._recombine_heads(out, point_batch_size) out = self.out_proj(out) return out def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.hidden_size]) if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.hidden_size]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.hidden_size]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.internal_dim]) class TFSamTwoWayAttentionBlock(keras.layers.Layer): def __init__(self, config, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False, **kwargs): """ A transformer block with four layers: (1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on sparse inputs (4) cross attention of dense inputs -> sparse inputs Arguments: config (`SamMaskDecoderConfig`): The configuration file used to instantiate the block attention_downsample_rate (*optionalk*, int, defaults to 2): The downsample ratio of the block used to reduce the inner dim of the attention. skip_first_layer_pe (*optional*, bool, defaults to `False`): Whether or not to skip the addition of the query_point_embedding on the first layer. """ super().__init__(**kwargs) self.hidden_size = config.hidden_size self.layer_norm_eps = config.layer_norm_eps self.self_attn = TFSamAttention(config, downsample_rate=1, name="self_attn") self.layer_norm1 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm1") self.cross_attn_token_to_image = TFSamAttention( config, downsample_rate=attention_downsample_rate, name="cross_attn_token_to_image" ) self.layer_norm2 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm2") self.mlp = TFSamMLPBlock(config, name="mlp") self.layer_norm3 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm3") self.layer_norm4 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm4") self.cross_attn_image_to_token = TFSamAttention( config, downsample_rate=attention_downsample_rate, name="cross_attn_image_to_token" ) self.skip_first_layer_pe = skip_first_layer_pe def call( self, queries: tf.Tensor, keys: tf.Tensor, query_point_embedding: tf.Tensor, key_point_embedding: tf.Tensor, output_attentions: bool = False, ): # Self attention block if self.skip_first_layer_pe: queries = self.self_attn(query=queries, key=queries, value=queries) else: query = queries + query_point_embedding attn_out = self.self_attn(query=query, key=query, value=queries) queries = queries + attn_out queries = self.layer_norm1(queries) # Cross attention block, tokens attending to image embedding query = queries + query_point_embedding key = keys + key_point_embedding attn_out = self.cross_attn_token_to_image(query=query, key=key, value=keys) queries = queries + attn_out queries = self.layer_norm2(queries) # MLP block mlp_out = self.mlp(queries) queries = queries + mlp_out queries = self.layer_norm3(queries) # Cross attention block, image embedding attending to tokens query = queries + query_point_embedding key = keys + key_point_embedding attn_out = self.cross_attn_image_to_token(query=key, key=query, value=queries) keys = keys + attn_out keys = self.layer_norm4(keys) outputs = (queries, keys) if output_attentions: outputs = outputs + (attn_out,) else: outputs = outputs + (None,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "layer_norm1", None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, None, self.hidden_size]) if getattr(self, "cross_attn_token_to_image", None) is not None: with tf.name_scope(self.cross_attn_token_to_image.name): self.cross_attn_token_to_image.build(None) if getattr(self, "layer_norm2", None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, None, self.hidden_size]) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, "layer_norm3", None) is not None: with tf.name_scope(self.layer_norm3.name): self.layer_norm3.build([None, None, None, self.hidden_size]) if getattr(self, "layer_norm4", None) is not None: with tf.name_scope(self.layer_norm4.name): self.layer_norm4.build([None, None, None, self.hidden_size]) if getattr(self, "cross_attn_image_to_token", None) is not None: with tf.name_scope(self.cross_attn_image_to_token.name): self.cross_attn_image_to_token.build(None) class TFSamTwoWayTransformer(keras.layers.Layer): def __init__(self, config: SamMaskDecoderConfig, **kwargs): super().__init__(**kwargs) self.config = config self.num_hidden_layers = config.num_hidden_layers self.layers = [] for i in range(self.num_hidden_layers): self.layers.append(TFSamTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0), name=f"layers_._{i}")) self.final_attn_token_to_image = TFSamAttention(config, name="final_attn_token_to_image") self.layer_norm_final_attn = keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="layer_norm_final_attn" ) def call( self, point_embeddings: tf.Tensor, image_embeddings: tf.Tensor, image_positional_embeddings: tf.Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TFBaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict all_attentions = () if image_embeddings is None: raise ValueError("You have to specify an image_embedding") image_embeddings = tf.transpose(flatten(image_embeddings, 2), perm=(0, 2, 1))[:, None] image_positional_embeddings = tf.transpose(flatten(image_positional_embeddings, 2), (0, 2, 1))[:, None] # Prepare queries queries = point_embeddings keys = image_embeddings # Apply transformer blocks and final layernorm for layer in self.layers: queries, keys, attention_outputs = layer( queries=queries, keys=keys, query_point_embedding=point_embeddings, key_point_embedding=image_positional_embeddings, output_attentions=output_attentions, ) if output_attentions: all_attentions = all_attentions + (attention_outputs,) # Apply the final attenion layer from the points to the image query = queries + point_embeddings key = keys + image_positional_embeddings attn_out = self.final_attn_token_to_image(query=query, key=key, value=keys) queries = queries + attn_out queries = self.layer_norm_final_attn(queries) return queries, keys, all_attentions def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "final_attn_token_to_image", None) is not None: with tf.name_scope(self.final_attn_token_to_image.name): self.final_attn_token_to_image.build(None) if getattr(self, "layer_norm_final_attn", None) is not None: with tf.name_scope(self.layer_norm_final_attn.name): self.layer_norm_final_attn.build([None, None, None, self.config.hidden_size]) for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFSamFeedForward(keras.layers.Layer): def __init__( self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, sigmoid_output: bool = False, **kwargs ): super().__init__(**kwargs) self.num_layers = num_layers self.activation = keras.layers.ReLU() self.proj_in = keras.layers.Dense(hidden_dim, input_shape=(input_dim,), name="proj_in") self.proj_out = keras.layers.Dense(output_dim, input_shape=(hidden_dim,), name="proj_out") self.layers = [ keras.layers.Dense(hidden_dim, input_shape=(hidden_dim,), name=f"layers_._{i}") for i in range(num_layers - 2) ] self.sigmoid_output = sigmoid_output self.hidden_dim = hidden_dim self.input_dim = input_dim def call(self, hidden_states): hidden_states = self.proj_in(hidden_states) hidden_states = self.activation(hidden_states) for layer in self.layers: hidden_states = self.activation(layer(hidden_states)) hidden_states = self.proj_out(hidden_states) if self.sigmoid_output: hidden_states = tf.sigmoid(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "proj_in", None) is not None: with tf.name_scope(self.proj_in.name): self.proj_in.build([None, None, self.input_dim]) if getattr(self, "proj_out", None) is not None: with tf.name_scope(self.proj_out.name): self.proj_out.build([None, None, self.hidden_dim]) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build([None, None, self.hidden_dim]) class TFSamMaskDecoder(keras.layers.Layer): def __init__(self, config: SamMaskDecoderConfig, **kwargs): super().__init__(**kwargs) self.hidden_size = config.hidden_size self.num_multimask_outputs = config.num_multimask_outputs self.num_mask_tokens = config.num_multimask_outputs + 1 self.transformer = TFSamTwoWayTransformer(config, name="transformer") self.upscale_conv1 = keras.layers.Conv2DTranspose( self.hidden_size // 4, kernel_size=2, strides=2, name="upscale_conv1", data_format="channels_first" ) self.upscale_conv2 = keras.layers.Conv2DTranspose( self.hidden_size // 8, kernel_size=2, strides=2, name="upscale_conv2", data_format="channels_first" ) self.upscale_layer_norm = TFSamLayerNorm( self.hidden_size // 4, data_format="channels_first", name="upscale_layer_norm" ) self.activation = tf.nn.gelu mlps_list = [] for i in range(self.num_mask_tokens): mlps_list += [ TFSamFeedForward( self.hidden_size, self.hidden_size, self.hidden_size // 8, 3, name=f"output_hypernetworks_mlps_._{i}", ) ] self.output_hypernetworks_mlps = mlps_list self.iou_prediction_head = TFSamFeedForward( self.hidden_size, config.iou_head_hidden_dim, self.num_mask_tokens, config.iou_head_depth, name="iou_prediction_head", ) def build(self, input_shape=None): if self.built: return self.built = True self.iou_token = self.add_weight(shape=(1, self.hidden_size), name="iou_token.weight", trainable=True) self.mask_tokens = self.add_weight( shape=(self.num_mask_tokens, self.hidden_size), name="mask_tokens.weight", trainable=True ) if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "upscale_conv1", None) is not None: with tf.name_scope(self.upscale_conv1.name): self.upscale_conv1.build([None, self.hidden_size, None, None]) if getattr(self, "upscale_conv2", None) is not None: with tf.name_scope(self.upscale_conv2.name): self.upscale_conv2.build([None, self.hidden_size // 4, None, None]) if getattr(self, "upscale_layer_norm", None) is not None: with tf.name_scope(self.upscale_layer_norm.name): self.upscale_layer_norm.build(None) if getattr(self, "iou_prediction_head", None) is not None: with tf.name_scope(self.iou_prediction_head.name): self.iou_prediction_head.build(None) for mlp in self.output_hypernetworks_mlps: with tf.name_scope(mlp.name): mlp.build(None) def call( self, image_embeddings: tf.Tensor, image_positional_embeddings: tf.Tensor, sparse_prompt_embeddings: tf.Tensor, dense_prompt_embeddings: tf.Tensor, multimask_output: bool, output_attentions: Optional[bool] = None, ) -> Tuple[tf.Tensor, tf.Tensor]: batch_size, num_channels, height, width = shape_list(image_embeddings) point_batch_size = tf.math.maximum(1, tf.shape(sparse_prompt_embeddings)[1]) output_tokens = tf.concat([self.iou_token, self.mask_tokens], axis=0) # Should be (1, 32) + (4, 32) = (5, 32) output_tokens = tf.tile( output_tokens[None, None, :], [batch_size, point_batch_size, 1, 1] ) # Should be (batch_size, point_size, 5, 32) # Matt: The original Torch code checked that the sum of sparse_prompt_embeddings equalled 0. However, this only # happens when the sparse prompt embeddings are an empty tensor with shape[1] == 0. I replaced # it with an explicit shape check to avoid data-dependent control flow which breaks XLA. if shape_list(sparse_prompt_embeddings)[1] != 0: tokens = tf.concat((output_tokens, sparse_prompt_embeddings), axis=2) else: tokens = output_tokens point_embeddings = tf.cast(tokens, self.iou_token.dtype) image_embeddings = image_embeddings + dense_prompt_embeddings image_embeddings = tf.repeat(image_embeddings, point_batch_size, axis=0) image_positional_embeddings = tf.repeat(image_positional_embeddings, point_batch_size, axis=0) point_embedding, image_embeddings, attentions = self.transformer( point_embeddings=point_embeddings, image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, output_attentions=output_attentions, ) iou_token_out = point_embedding[:, :, 0, :] mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :] image_embeddings = tf.transpose(image_embeddings, perm=(0, 1, 3, 2)) image_embeddings = tf.reshape(image_embeddings, [batch_size * point_batch_size, num_channels, height, width]) upscaled_embedding = self.upscale_conv1(image_embeddings) upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding)) upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding)) hyper_in_list = [] for i in range(self.num_mask_tokens): current_mlp = self.output_hypernetworks_mlps[i] hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])] hyper_in = tf.stack(hyper_in_list, axis=2) _, num_channels, height, width = shape_list(upscaled_embedding) upscaled_embedding = tf.reshape( upscaled_embedding, [batch_size, point_batch_size, num_channels, height * width] ) masks = tf.reshape(hyper_in @ upscaled_embedding, [batch_size, point_batch_size, -1, height, width]) iou_pred = self.iou_prediction_head(iou_token_out) if multimask_output: mask_slice = slice(1, None) else: mask_slice = slice(0, 1) masks = masks[:, :, mask_slice, :, :] iou_pred = iou_pred[:, :, mask_slice] outputs = (masks, iou_pred) if output_attentions: outputs = outputs + (attentions,) else: outputs = outputs + (None,) return outputs class TFSamPositionalEmbedding(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.scale = config.hidden_size // 2 self.config = config def build(self, input_shape): # TODO Matt: What is going on here? Why is a non-trainable weight randomly initialized? self.positional_embedding = self.add_weight( name="positional_embedding", shape=(2, self.config.num_pos_feats), initializer=keras.initializers.RandomNormal(mean=0.0, stddev=self.scale), trainable=False, ) super().build(input_shape) def call(self, input_coords, input_shape=None): """Positionally encode points that are normalized to [0,1].""" coordinates = tf.identity(input_coords) if input_shape is not None: coordinates = tf.stack( [ tf.cast(coordinates[:, :, :, 0], tf.float32) / input_shape[1], tf.cast(coordinates[:, :, :, 1], tf.float32) / input_shape[0], ], axis=-1, ) # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape coordinates = 2 * coordinates - 1 coordinates = tf.cast(coordinates, self.positional_embedding.dtype) coordinates = tf.matmul(coordinates, self.positional_embedding) coordinates = 2 * np.pi * coordinates # outputs d_1 x ... x d_n x channel shape return tf.concat([tf.sin(coordinates), tf.cos(coordinates)], axis=-1) class TFSamMaskEmbedding(keras.layers.Layer): def __init__(self, config: SamPromptEncoderConfig, **kwargs): super().__init__(**kwargs) self.mask_input_channels = config.mask_input_channels // 4 self.activation = ACT2FN[config.hidden_act] self.conv1 = keras.layers.Conv2D(self.mask_input_channels, kernel_size=2, strides=2, name="conv1") self.conv2 = keras.layers.Conv2D(config.mask_input_channels, kernel_size=2, strides=2, name="conv2") self.conv3 = keras.layers.Conv2D(config.hidden_size, kernel_size=1, name="conv3") self.layer_norm1 = TFSamLayerNorm(self.mask_input_channels, config.layer_norm_eps, name="layer_norm1") self.layer_norm2 = TFSamLayerNorm(self.mask_input_channels * 4, config.layer_norm_eps, name="layer_norm2") self.config = config def call(self, masks): masks = tf.transpose(masks, perm=(0, 2, 3, 1)) # Convert to channels-last hidden_states = self.conv1(masks) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) hidden_states = self.activation(hidden_states) dense_embeddings = self.conv3(hidden_states) dense_embeddings = tf.transpose(dense_embeddings, perm=(0, 3, 1, 2)) # Convert back to channels-first return dense_embeddings def build(self, input_shape=None): # This class needs an explicit build method because it isn't called with the standard dummy inputs if self.built: return self.built = True with tf.name_scope("conv1"): self.conv1.build([None, None, None, 1]) with tf.name_scope("conv2"): self.conv2.build([None, None, None, self.mask_input_channels]) with tf.name_scope("conv3"): self.conv3.build([None, None, None, self.mask_input_channels * 4]) with tf.name_scope("layer_norm1"): self.layer_norm1.build([None, None, None, self.mask_input_channels]) with tf.name_scope("layer_norm2"): self.layer_norm2.build([None, None, None, self.mask_input_channels * 4]) class TFSamPromptEncoder(keras.layers.Layer): def __init__(self, config: SamPromptEncoderConfig, shared_patch_embedding, **kwargs): super().__init__(**kwargs) self.shared_embedding = shared_patch_embedding self.mask_embed = TFSamMaskEmbedding(config, name="mask_embed") self.no_mask_embed = None self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size) self.input_image_size = config.image_size self.point_embed = [] self.hidden_size = config.hidden_size self.not_a_point_embed = None self.config = config def build(self, input_shape=None): self.no_mask_embed = self.add_weight( name="no_mask_embed.weight", shape=(1, self.hidden_size), initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02), trainable=True, ) self.point_embed = [ self.add_weight( name=f"point_embed_._{i}.weight", shape=(1, self.hidden_size), initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02), trainable=True, ) for i in range(self.config.num_point_embeddings) ] self.not_a_point_embed = self.add_weight( name="not_a_point_embed.weight", shape=(1, self.hidden_size), initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02), trainable=True, ) with tf.name_scope("mask_embed"): # We must explicitly build the mask embed because it isn't touched by the standard dummy inputs self.mask_embed.build( (None, self.config.mask_input_channels, self.config.image_size, self.config.image_size) ) if self.built: return self.built = True if getattr(self, "mask_embed", None) is not None: with tf.name_scope(self.mask_embed.name): self.mask_embed.build(None) def _embed_points(self, points: tf.Tensor, labels: tf.Tensor, pad: bool) -> tf.Tensor: """Embeds point prompts.""" points = points + 0.5 # Shift to center of pixel if pad: target_point_shape = (shape_list(points)[0], shape_list(points)[1], 1, shape_list(points)[-1]) target_labels_shape = (shape_list(points)[0], shape_list(points)[1], 1) padding_point = tf.zeros(target_point_shape, dtype=points.dtype) padding_label = -tf.ones(target_labels_shape, dtype=labels.dtype) points = tf.concat([points, padding_point], axis=2) labels = tf.concat([labels, padding_label], axis=2) input_shape = (self.input_image_size, self.input_image_size) point_embedding = self.shared_embedding(points, input_shape) point_embedding = tf.where(labels[..., None] == -1, self.not_a_point_embed[0], point_embedding) point_embedding = tf.where( labels[..., None] != -10, point_embedding, tf.zeros_like(point_embedding), ) point_embedding = tf.where( (labels == 0)[:, :, :, None], point_embedding + self.point_embed[0], point_embedding ) point_embedding = tf.where( (labels == 1)[:, :, :, None], point_embedding + self.point_embed[1], point_embedding ) return point_embedding def _embed_boxes(self, boxes: tf.Tensor) -> tf.Tensor: """Embeds box prompts.""" boxes = boxes + 0.5 # Shift to center of pixel batch_size, nb_boxes = shape_list(boxes)[:2] coords = tf.reshape(boxes, (batch_size, nb_boxes, 2, 2)) input_shape = (self.input_image_size, self.input_image_size) corner_embedding = self.shared_embedding(coords, input_shape) corner_embedding += tf.where( tf.range(shape_list(corner_embedding)[2])[None, None, :, None] == 0, self.point_embed[2][0], self.point_embed[3][0], ) return corner_embedding def call( self, batch_size: Optional[int], input_points: Optional[Tuple[tf.Tensor, tf.Tensor]], input_labels: tf.Tensor | None, input_boxes: tf.Tensor | None, input_masks: tf.Tensor | None, ) -> Tuple[tf.Tensor, tf.Tensor]: """ Embeds different types of prompts, returning both sparse and dense embeddings. Args: points (`tf.Tensor`, *optional*): point coordinates and labels to embed. boxes (`tf.Tensor`, *optional*): boxes to embed masks (`tf.Tensor`, *optional*): masks to embed """ sparse_embeddings = None if input_points is not None: batch_size, point_batch_size = shape_list(input_points)[:2] if input_labels is None: raise ValueError("If points are provided, labels must also be provided.") point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None)) sparse_embeddings = tf.zeros( (batch_size, point_batch_size, 0, self.hidden_size), dtype=point_embeddings.dtype ) sparse_embeddings = tf.concat([sparse_embeddings, point_embeddings], axis=2) if input_boxes is not None: batch_size = shape_list(input_boxes)[0] box_embeddings = self._embed_boxes(input_boxes) if sparse_embeddings is None: sparse_embeddings = box_embeddings else: sparse_embeddings = tf.concat([sparse_embeddings, box_embeddings], axis=2) if input_masks is not None: dense_embeddings = self.mask_embed(input_masks) else: dense_embeddings = self.no_mask_embed[0] dense_embeddings = tf.reshape(dense_embeddings, (1, -1, 1, 1)) dense_embeddings = tf.tile( dense_embeddings, (batch_size, 1, self.image_embedding_size[0], self.image_embedding_size[1]) ) if sparse_embeddings is None: sparse_embeddings = tf.zeros((batch_size, 0, 1, self.hidden_size), dtype=dense_embeddings.dtype) return sparse_embeddings, dense_embeddings class TFSamVisionAttention(keras.layers.Layer): """Multi-head Attention block with relative position embeddings.""" def __init__(self, config, window_size, **kwargs): super().__init__(**kwargs) input_size = ( (config.image_size // config.patch_size, config.image_size // config.patch_size) if window_size == 0 else (window_size, window_size) ) self.input_size = input_size self.num_attention_heads = config.num_attention_heads head_dim = config.hidden_size // config.num_attention_heads self.head_dim = head_dim self.scale = head_dim**-0.5 self.dropout = config.attention_dropout self.qkv = keras.layers.Dense(config.hidden_size * 3, use_bias=config.qkv_bias, name="qkv") self.proj = keras.layers.Dense(config.hidden_size, name="proj") self.use_rel_pos = config.use_rel_pos if self.use_rel_pos: if input_size is None: raise ValueError("Input size must be provided if using relative positional encoding.") self.config = config def build(self, input_shape=None): if self.input_size is not None: # initialize relative positional embeddings self.rel_pos_h = self.add_weight( shape=(2 * self.input_size[0] - 1, self.head_dim), initializer="zeros", name="rel_pos_h" ) self.rel_pos_w = self.add_weight( shape=(2 * self.input_size[1] - 1, self.head_dim), initializer="zeros", name="rel_pos_w" ) if self.built: return self.built = True if getattr(self, "qkv", None) is not None: with tf.name_scope(self.qkv.name): self.qkv.build([None, None, self.config.hidden_size]) if getattr(self, "proj", None) is not None: with tf.name_scope(self.proj.name): self.proj.build([None, None, self.config.hidden_size]) def get_rel_pos(self, q_size: int, k_size: int, rel_pos: tf.Tensor) -> tf.Tensor: """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (int): size of the query. k_size (int): size of key k. rel_pos (`tf.Tensor`): relative position embeddings (L, channel). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos if needed. if rel_pos.shape[0] != max_rel_dist: # Interpolate rel pos. rel_pos_resized = tf.image.resize( tf.reshape(rel_pos, (1, rel_pos.shape[0], -1)), size=(max_rel_dist, rel_pos.shape[1]), method="bilinear", ) rel_pos_resized = tf.reshape(rel_pos_resized, (-1, max_rel_dist)) else: rel_pos_resized = rel_pos # Scale the coords with short length if shapes for q and k are different. q_coords = tf.expand_dims(tf.range(q_size, dtype=tf.float32), 1) * max(k_size / q_size, 1.0) k_coords = tf.expand_dims(tf.range(k_size, dtype=tf.float32), 0) * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return tf.gather(rel_pos_resized, tf.cast(relative_coords, tf.int32)) def add_decomposed_rel_pos( self, attn: tf.Tensor, query: tf.Tensor, rel_pos_h: tf.Tensor, rel_pos_w: tf.Tensor, q_size: Tuple[int, int], k_size: Tuple[int, int], ) -> tf.Tensor: """ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py Args: attn (`tf.Tensor`): attention map. query (`tf.Tensor`): query q in the attention layer with shape (batch_size, query_height * query_width, channel). rel_pos_h (`tf.Tensor`): relative position embeddings (Lh, channel) for height axis. rel_pos_w (`tf.Tensor`): relative position embeddings (Lw, channel) for width axis. q_size (tuple): spatial sequence size of query q with (query_height, query_width). k_size (tuple): spatial sequence size of key k with (key_height, key_width). Returns: attn (`tf.Tensor`): attention map with added relative positional embeddings. """ query_height, query_width = q_size key_height, key_width = k_size relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h) relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w) batch_size, _, dim = shape_list(query) reshaped_query = tf.reshape(query, (batch_size, query_height, query_width, dim)) rel_h = tf.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height) rel_w = tf.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width) attn = tf.reshape(attn, (batch_size, query_height, query_width, key_height, key_width)) attn = attn + tf.expand_dims(rel_h, axis=-1) + tf.expand_dims(rel_w, axis=-2) attn = tf.reshape(attn, (batch_size, query_height * query_width, key_height * key_width)) return attn def call(self, hidden_states: tf.Tensor, output_attentions=False, training=False) -> tf.Tensor: batch_size, height, width, _ = shape_list(hidden_states) # qkv with shape (3, batch_size, nHead, height * width, channel) qkv = tf.reshape(self.qkv(hidden_states), (batch_size, height * width, 3, self.num_attention_heads, -1)) qkv = tf.transpose(qkv, perm=(2, 0, 3, 1, 4)) # q, k, v with shape (batch_size * nHead, height * width, channel) query, key, value = tf.unstack( tf.reshape(qkv, (3, batch_size * self.num_attention_heads, height * width, -1)), axis=0 ) attn_weights = tf.matmul(query * self.scale, key, transpose_b=True) if self.use_rel_pos: attn_weights = self.add_decomposed_rel_pos( attn_weights, query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) ) attn_weights = tf.nn.softmax(attn_weights, axis=-1) if training: attn_probs = tf.nn.dropout(attn_weights, rate=self.dropout) else: attn_probs = attn_weights attn_output = tf.reshape(attn_probs @ value, (batch_size, self.num_attention_heads, height, width, -1)) attn_output = tf.transpose(attn_output, perm=(0, 2, 3, 1, 4)) attn_output = tf.reshape(attn_output, (batch_size, height, width, self.config.hidden_size)) attn_output = self.proj(attn_output) if output_attentions: outputs = (attn_output, attn_weights) else: outputs = (attn_output, None) return outputs class TFSamVisionLayer(keras.layers.Layer): def __init__(self, config, window_size, **kwargs): super().__init__(**kwargs) self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1") self.attn = TFSamVisionAttention(config, window_size, name="attn") self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2") self.mlp = TFSamMLPBlock(config, name="mlp") self.window_size = window_size self.config = config def window_partition(self, hidden_states: tf.Tensor, window_size: int) -> Tuple[tf.Tensor, Tuple[int, int]]: batch_size, height, width, channel = shape_list(hidden_states) pad_h = (window_size - height % window_size) % window_size pad_w = (window_size - width % window_size) % window_size if pad_h > 0 or pad_w > 0: hidden_states = tf.pad(hidden_states, [[0, 0], [0, pad_h], [0, pad_w], [0, 0]]) pad_height, pad_width = height + pad_h, width + pad_w hidden_states = tf.reshape( hidden_states, [batch_size, pad_height // window_size, window_size, pad_width // window_size, window_size, channel], ) windows = tf.reshape( tf.transpose(hidden_states, perm=[0, 1, 3, 2, 4, 5]), [-1, window_size, window_size, channel] ) return windows, (pad_height, pad_width) def window_unpartition( self, windows: tf.Tensor, window_size: int, padding_shape: Tuple[int, int], original_shape: Tuple[int, int] ) -> tf.Tensor: pad_height, pad_width = padding_shape height, width = original_shape batch_size = shape_list(windows)[0] // (pad_height * pad_width // window_size // window_size) hidden_states = tf.reshape( windows, [batch_size, pad_height // window_size, pad_width // window_size, window_size, window_size, -1] ) hidden_states = tf.reshape( tf.transpose(hidden_states, perm=[0, 1, 3, 2, 4, 5]), [batch_size, pad_height, pad_width, -1] ) if pad_height > height or pad_width > width: hidden_states = hidden_states[:, :height, :width, :] return hidden_states def call( self, hidden_states: tf.Tensor, output_attentions: Optional[bool] = False, training: Optional[bool] = False, ) -> Tuple[tf.Tensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) if self.window_size > 0: height, width = hidden_states.shape[1], hidden_states.shape[2] hidden_states, padding_shape = self.window_partition(hidden_states, self.window_size) hidden_states, attn_weights = self.attn( hidden_states=hidden_states, output_attentions=output_attentions, training=training, ) if self.window_size > 0: hidden_states = self.window_unpartition(hidden_states, self.window_size, padding_shape, (height, width)) hidden_states = residual + hidden_states layernorm_output = self.layer_norm2(hidden_states) hidden_states = hidden_states + self.mlp(layernorm_output) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer_norm1", None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, None, self.config.hidden_size]) if getattr(self, "attn", None) is not None: with tf.name_scope(self.attn.name): self.attn.build(None) if getattr(self, "layer_norm2", None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, None, self.config.hidden_size]) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) class TFSamVisionNeck(keras.layers.Layer): def __init__(self, config: SamVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.conv1 = keras.layers.Conv2D( config.output_channels, kernel_size=1, use_bias=False, name="conv1", ) self.layer_norm1 = TFSamLayerNorm(config.output_channels, name="layer_norm1") self.conv2 = keras.layers.Conv2D( config.output_channels, kernel_size=3, padding="same", use_bias=False, name="conv2", ) self.layer_norm2 = TFSamLayerNorm(config.output_channels, name="layer_norm2") def call(self, hidden_states): hidden_states = self.conv1(hidden_states) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) hidden_states = tf.transpose(hidden_states, perm=[0, 3, 1, 2]) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv1", None) is not None: with tf.name_scope(self.conv1.name): self.conv1.build([None, None, None, self.config.hidden_size]) if getattr(self, "layer_norm1", None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build(None) if getattr(self, "conv2", None) is not None: with tf.name_scope(self.conv2.name): self.conv2.build([None, None, None, self.config.output_channels]) if getattr(self, "layer_norm2", None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build(None) class TFSamVisionEncoder(keras.layers.Layer): def __init__(self, config: SamVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.image_size = config.image_size self.patch_embed = TFSamPatchEmbeddings(config, name="patch_embed") self.pos_embed = None self.layers = [] for i in range(config.num_hidden_layers): layer = TFSamVisionLayer( config, window_size=config.window_size if i not in config.global_attn_indexes else 0, name=f"layers_._{i}", ) self.layers.append(layer) self.neck = TFSamVisionNeck(config, name="neck") def build(self, input_shape=None): if self.built: return self.built = True if self.config.use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = self.add_weight( shape=[ 1, self.config.image_size // self.config.patch_size, self.config.image_size // self.config.patch_size, self.config.hidden_size, ], initializer="zeros", trainable=True, name="pos_embed", ) if getattr(self, "patch_embed", None) is not None: with tf.name_scope(self.patch_embed.name): self.patch_embed.build(None) if getattr(self, "neck", None) is not None: with tf.name_scope(self.neck.name): self.neck.build(None) for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) def get_input_embeddings(self): return self.patch_embed def call( self, pixel_values: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFSamVisionEncoderOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.patch_embed(pixel_values) if self.pos_embed is not None: hidden_states = hidden_states + self.pos_embed all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, output_attentions=output_attentions, training=training) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.neck(hidden_states) if not return_dict: outputs = (hidden_states,) if output_hidden_states: outputs = outputs + (all_hidden_states,) if output_attentions: outputs = outputs + (all_self_attentions,) return outputs return TFSamVisionEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class TFSamPreTrainedModel(TFPreTrainedModel): config_class = SamConfig base_model_prefix = "sam" main_input_name = "pixel_values" SAM_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a TensorFlow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TensorFlow Model and refer to the TensorFlow documentation for all matter related to general usage and behavior. Parameters: config ([`SamConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ SAM_INPUTS_DOCSTRING = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`SamProcessor`]. See [`SamProcessor.__call__`] for details. input_points (`tf.Tensor` of shape `(batch_size, num_points, 2)`): Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much better results. The points can be obtained by passing a list of list of list to the processor that will create corresponding `tf` tensors of dimension 4. The first dimension is the image batch size, the second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict per input point), the third dimension is the number of points per segmentation mask (it is possible to pass multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal) coordinates of the point. If a different number of points is passed either for each image, or for each mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the computation of the embedding will be skipped for these points using the labels. input_labels (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points)`): Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the official implementation, there are 3 types of labels - `1`: the point is a point that contains the object of interest - `0`: the point is a point that does not contain the object of interest - `-1`: the point corresponds to the background We added the label: - `-10`: the point is a padding point, thus should be ignored by the prompt encoder The padding labels should be automatically done by the processor. input_boxes (`tf.Tensor` of shape `(batch_size, num_boxes, 4)`): Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to much better generated masks. The boxes can be obtained by passing a list of list of list to the processor, that will generate a `tf` tensor, with each dimension corresponding respectively to the image batch size, the number of boxes per image and the coordinates of the top left and botton right point of the box. In the order (`x1`, `y1`, `x2`, `y2`): - `x1`: the x coordinate of the top left point of the input box - `y1`: the y coordinate of the top left point of the input box - `x2`: the x coordinate of the bottom right point of the input box - `y2`: the y coordinate of the bottom right point of the input box input_masks (`tf.Tensor` of shape `(batch_size, image_size, image_size)`): SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`). image_embeddings (`tf.Tensor` of shape `(batch_size, output_channels, window_size, window_size)`): Image embeddings, this is used by the mask decder to generate masks and iou scores. For more memory efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings` method, and then feed them to the `call` method instead of feeding the `pixel_values`. multimask_output (`bool`, *optional*): In the original implementation and paper, the model always outputs 3 masks per image (or per point / per bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the "best" mask, by specifying `multimask_output=False`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "Segment Anything Model (SAM) for generating segmentation masks, given an input image and ", " optional 2D location and bounding boxes.", SAM_START_DOCSTRING, ) class TFSamModel(TFSamPreTrainedModel): _keys_to_ignore_on_load_missing = [r"prompt_encoder.shared_embedding.positional_embedding"] def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.shared_image_embedding = TFSamPositionalEmbedding(config.vision_config, name="shared_image_embedding") self.vision_encoder = TFSamVisionEncoder(config.vision_config, name="vision_encoder") self.prompt_encoder = TFSamPromptEncoder( config.prompt_encoder_config, self.shared_image_embedding, name="prompt_encoder" ) self.mask_decoder = TFSamMaskDecoder(config.mask_decoder_config, name="mask_decoder") self.config = config def get_input_embeddings(self): return self.vision_encoder.get_input_embeddings() def get_image_wide_positional_embeddings(self): size = self.config.prompt_encoder_config.image_embedding_size grid = tf.ones((size, size)) y_embed = tf.math.cumsum(grid, axis=0) - 0.5 x_embed = tf.math.cumsum(grid, axis=1) - 0.5 y_embed = y_embed / size x_embed = x_embed / size positional_embedding = self.shared_image_embedding(tf.stack([x_embed, y_embed], axis=-1)) return tf.expand_dims(tf.transpose(positional_embedding, perm=[2, 0, 1]), axis=0) # channel x height x width def get_image_embeddings( self, pixel_values, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Returns the image embeddings by passing the pixel values through the vision encoder. Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Input pixel values output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.TFModelOutput`] instead of a plain tuple. """ vision_output = self.vision_encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeddings = vision_output[0] return image_embeddings def get_prompt_embeddings( self, input_points: tf.Tensor | None = None, input_labels: tf.Tensor | None = None, input_boxes: tf.Tensor | None = None, input_masks: tf.Tensor | None = None, ): r""" Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder. Args: input_points (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`): Optional input points for the prompt encoder. The padding of the point is automatically done by the processor. `point_batch_size` refers to the number of masks that we want the model to predict per point. The model will output `point_batch_size` times 3 masks in total. input_labels (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image)`): Optional input labels for the prompt encoder. The padding of the labels is automatically done by the processor, or can be fed by the user. input_boxes (`tf.Tensor` of shape `(batch_size, num_boxes_per_image, 4)`): Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the processor. users can also pass manually the input boxes. input_masks (`tf.Tensor` of shape `(batch_size, image_size, image_size)`): Optional input masks for the prompt encoder. """ prompt_output = self.prompt_encoder( input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) return prompt_output @unpack_inputs @add_start_docstrings_to_model_forward(SAM_INPUTS_DOCSTRING) def call( self, pixel_values: TFModelInputType | None = None, input_points: tf.Tensor | None = None, input_labels: tf.Tensor | None = None, input_boxes: tf.Tensor | None = None, input_masks: tf.Tensor | None = None, image_embeddings: tf.Tensor | None = None, multimask_output: bool = True, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, **kwargs, ) -> TFSamImageSegmentationOutput | Tuple[tf.Tensor]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None and image_embeddings is None: raise ValueError("Either pixel_values or image_embeddings must be provided.") if pixel_values is not None and image_embeddings is not None: raise ValueError("Only one of pixel_values and image_embeddings can be provided.") if input_points is not None and len(input_points.shape) != 4: raise ValueError( "The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.", " got {}.".format(input_points.shape), ) if input_boxes is not None and len(input_boxes.shape) != 3: raise ValueError( "The input_points must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`.", " got {}.".format(input_boxes.shape), ) if input_points is not None and input_boxes is not None: point_batch_size = shape_list(input_points)[1] box_batch_size = shape_list(input_boxes)[1] if point_batch_size != box_batch_size: raise ValueError( "You should provide as many bounding boxes as input points per box. Got {} and {}.".format( point_batch_size, box_batch_size ) ) if pixel_values is not None: # Ensures that later checks pass even with an all-None shape from the serving signature pixel_values = tf.ensure_shape( pixel_values, [ None, self.config.vision_config.num_channels, self.config.vision_config.image_size, self.config.vision_config.image_size, ], ) image_positional_embeddings = self.get_image_wide_positional_embeddings() # repeat with batch size batch_size = shape_list(pixel_values)[0] if pixel_values is not None else shape_list(image_embeddings)[0] image_positional_embeddings = tf.repeat(image_positional_embeddings, batch_size, axis=0) vision_attentions = None vision_hidden_states = None if pixel_values is not None: vision_outputs = self.vision_encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, training=training, ) image_embeddings = vision_outputs["last_hidden_state"] if output_hidden_states: vision_hidden_states = vision_outputs["hidden_states"] if output_attentions: vision_attentions = vision_outputs["attentions"] if input_points is not None and input_labels is None: input_labels = tf.ones_like(input_points[:, :, :, 0], dtype=tf.int32) if input_points is not None and image_embeddings.shape[0] != input_points.shape[0]: raise ValueError( "The batch size of the image embeddings and the input points must be the same. ", "Got {} and {} respectively.".format(image_embeddings.shape[0], input_points.shape[0]), " if you want to pass multiple points for the same image, make sure that you passed ", " input_points of shape (batch_size, point_batch_size, num_points_per_image, 3) and ", " input_labels of shape (batch_size, point_batch_size, num_points_per_image)", ) sparse_embeddings, dense_embeddings = self.prompt_encoder( batch_size=shape_list(image_embeddings)[0], input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) low_res_masks, iou_predictions, mask_decoder_attentions = self.mask_decoder( image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, output_attentions=output_attentions, ) if not return_dict: output = (iou_predictions, low_res_masks) if output_hidden_states: output = output + (vision_hidden_states,) if output_attentions: output = output + (vision_attentions, mask_decoder_attentions) return output return TFSamImageSegmentationOutput( iou_scores=iou_predictions, pred_masks=low_res_masks, vision_hidden_states=vision_hidden_states, vision_attentions=vision_attentions, mask_decoder_attentions=mask_decoder_attentions, ) def serving_output(self, output: TFSamImageSegmentationOutput) -> TFSamImageSegmentationOutput: hs = tf.convert_to_tensor(output.vision_hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.vision_attentions) if self.config.output_attentions else None return TFSamImageSegmentationOutput( iou_scores=output.iou_scores, pred_masks=output.pred_masks, vision_hidden_states=hs if self.config.output_hidden_states else None, vision_attentions=attns if self.config.output_attentions else None, mask_decoder_attentions=output.mask_decoder_attentions if self.config.output_attentions else None, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "shared_image_embedding", None) is not None: with tf.name_scope(self.shared_image_embedding.name): self.shared_image_embedding.build(None) if getattr(self, "vision_encoder", None) is not None: with tf.name_scope(self.vision_encoder.name): self.vision_encoder.build(None) if getattr(self, "prompt_encoder", None) is not None: with tf.name_scope(self.prompt_encoder.name): self.prompt_encoder.build(None) if getattr(self, "mask_decoder", None) is not None: with tf.name_scope(self.mask_decoder.name): self.mask_decoder.build(None)
transformers/src/transformers/models/sam/modeling_tf_sam.py/0
{ "file_path": "transformers/src/transformers/models/sam/modeling_tf_sam.py", "repo_id": "transformers", "token_count": 33371 }
342
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert SegFormer checkpoints.""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def rename_keys(state_dict, encoder_only=False): new_state_dict = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head"): key = "segformer.encoder." + key if key.startswith("backbone"): key = key.replace("backbone", "segformer.encoder") if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 idx = key[key.find("patch_embed") + len("patch_embed")] key = key.replace(f"patch_embed{idx}", f"patch_embeddings.{int(idx)-1}") if "norm" in key: key = key.replace("norm", "layer_norm") if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 idx = key[key.find("segformer.encoder.layer_norm") + len("segformer.encoder.layer_norm")] key = key.replace(f"layer_norm{idx}", f"layer_norm.{int(idx)-1}") if "layer_norm1" in key: key = key.replace("layer_norm1", "layer_norm_1") if "layer_norm2" in key: key = key.replace("layer_norm2", "layer_norm_2") if "block" in key: # replace for example block1 by block.0 idx = key[key.find("block") + len("block")] key = key.replace(f"block{idx}", f"block.{int(idx)-1}") if "attn.q" in key: key = key.replace("attn.q", "attention.self.query") if "attn.proj" in key: key = key.replace("attn.proj", "attention.output.dense") if "attn" in key: key = key.replace("attn", "attention.self") if "fc1" in key: key = key.replace("fc1", "dense1") if "fc2" in key: key = key.replace("fc2", "dense2") if "linear_pred" in key: key = key.replace("linear_pred", "classifier") if "linear_fuse" in key: key = key.replace("linear_fuse.conv", "linear_fuse") key = key.replace("linear_fuse.bn", "batch_norm") if "linear_c" in key: # replace for example linear_c4 by linear_c.3 idx = key[key.find("linear_c") + len("linear_c")] key = key.replace(f"linear_c{idx}", f"linear_c.{int(idx)-1}") if key.startswith("head"): key = key.replace("head", "classifier") new_state_dict[key] = value return new_state_dict def read_in_k_v(state_dict, config): # for each of the encoder blocks: for i in range(config.num_encoder_blocks): for j in range(config.depths[i]): # read in weights + bias of keys and values (which is a single matrix in the original implementation) kv_weight = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.weight") kv_bias = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.bias") # next, add keys and values (in that order) to the state dict state_dict[f"segformer.encoder.block.{i}.{j}.attention.self.key.weight"] = kv_weight[ : config.hidden_sizes[i], : ] state_dict[f"segformer.encoder.block.{i}.{j}.attention.self.key.bias"] = kv_bias[: config.hidden_sizes[i]] state_dict[f"segformer.encoder.block.{i}.{j}.attention.self.value.weight"] = kv_weight[ config.hidden_sizes[i] :, : ] state_dict[f"segformer.encoder.block.{i}.{j}.attention.self.value.bias"] = kv_bias[ config.hidden_sizes[i] : ] # We will verify our results on a COCO image def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @torch.no_grad() def convert_segformer_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our SegFormer structure. """ # load default SegFormer configuration config = SegformerConfig() encoder_only = False # set attributes based on model_name repo_id = "huggingface/label-files" if "segformer" in model_name: size = model_name[len("segformer.") : len("segformer.") + 2] if "ade" in model_name: config.num_labels = 150 filename = "ade20k-id2label.json" expected_shape = (1, 150, 128, 128) elif "city" in model_name: config.num_labels = 19 filename = "cityscapes-id2label.json" expected_shape = (1, 19, 128, 128) else: raise ValueError(f"Model {model_name} not supported") elif "mit" in model_name: encoder_only = True size = model_name[4:6] config.num_labels = 1000 filename = "imagenet-1k-id2label.json" expected_shape = (1, 1000) else: raise ValueError(f"Model {model_name} not supported") # set config attributes id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} if size == "b0": pass elif size == "b1": config.hidden_sizes = [64, 128, 320, 512] config.decoder_hidden_size = 256 elif size == "b2": config.hidden_sizes = [64, 128, 320, 512] config.decoder_hidden_size = 768 config.depths = [3, 4, 6, 3] elif size == "b3": config.hidden_sizes = [64, 128, 320, 512] config.decoder_hidden_size = 768 config.depths = [3, 4, 18, 3] elif size == "b4": config.hidden_sizes = [64, 128, 320, 512] config.decoder_hidden_size = 768 config.depths = [3, 8, 27, 3] elif size == "b5": config.hidden_sizes = [64, 128, 320, 512] config.decoder_hidden_size = 768 config.depths = [3, 6, 40, 3] else: raise ValueError(f"Size {size} not supported") # load image processor (only resize + normalize) image_processor = SegformerImageProcessor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) # prepare image image = prepare_img() pixel_values = image_processor(images=image, return_tensors="pt").pixel_values logger.info(f"Converting model {model_name}...") # load original state dict if encoder_only: state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu")) else: state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu"))["state_dict"] # rename keys state_dict = rename_keys(state_dict, encoder_only=encoder_only) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(state_dict, config) # create HuggingFace model and load state dict if encoder_only: config.reshape_last_stage = False model = SegformerForImageClassification(config) else: model = SegformerForSemanticSegmentation(config) model.load_state_dict(state_dict) model.eval() # forward pass outputs = model(pixel_values) logits = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": expected_slice = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": expected_slice = torch.tensor( [ [[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]], [[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]], [[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": expected_slice = torch.tensor( [ [[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]], [[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]], [[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": expected_slice = torch.tensor( [ [[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]], [[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]], [[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": expected_slice = torch.tensor( [ [[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]], [[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]], [[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": expected_slice = torch.tensor( [ [[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]], [[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]], [[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": expected_slice = torch.tensor( [ [[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]], [[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]], [[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": expected_slice = torch.tensor( [ [[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]], [[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]], [[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": expected_slice = torch.tensor( [ [ [-1.1372e01, -1.2787e01, -1.3477e01], [-1.2536e01, -1.4194e01, -1.4409e01], [-1.3217e01, -1.4888e01, -1.5327e01], ], [ [-1.4791e01, -1.7122e01, -1.8277e01], [-1.7163e01, -1.9192e01, -1.9533e01], [-1.7897e01, -1.9991e01, -2.0315e01], ], [ [7.6723e-01, 4.1921e-01, -7.7878e-02], [4.7772e-01, 9.5557e-03, -2.8082e-01], [3.6032e-01, -2.4826e-01, -5.1168e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": expected_slice = torch.tensor( [ [[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]], [[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]], [[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": expected_slice = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": expected_slice = torch.tensor( [ [[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]], [[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]], [[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": expected_slice = torch.tensor( [ [[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]], [[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]], [[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": expected_slice = torch.tensor( [ [[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]], [[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]], [[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": expected_slice = torch.tensor( [ [[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]], [[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]], [[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]], ] ) else: predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-2) # finally, save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_name", default="segformer.b0.512x512.ade.160k", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) args = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
transformers/src/transformers/models/segformer/convert_segformer_original_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/segformer/convert_segformer_original_to_pytorch.py", "repo_id": "transformers", "token_count": 8906 }
343
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Speech2Text model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class Speech2TextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Speech2TextModel`]. It is used to instantiate a Speech2Text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Speech2Text [facebook/s2t-small-librispeech-asr](https://huggingface.co/facebook/s2t-small-librispeech-asr) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 10000): Vocabulary size of the Speech2Text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Speech2TextModel`] encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (often named feed-forward) layer in encoder. encoder_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. decoder_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer decoder. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](https://arxiv.org/abs/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether the model should return the last key/values attentions (not used by all models). is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether the model is set up as an encoder-decoder architecture for sequence-to-sequence tasks. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. d_model (`int`, *optional*, defaults to 256): Dimensionality of the layers and the pooler layer. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. decoder_start_token_id (`int`, *optional*, defaults to 2): The initial token ID of the decoder when decoding sequences. scale_embedding (`bool`, *optional*, defaults to `True`): Whether the embeddings are scaled by the square root of `d_model`. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): The id of the beginning-of-sequence token. eos_token_id (`int`, *optional*, defaults to 2): The id of the end-of-sequence token. max_source_positions (`int`, *optional*, defaults to 6000): The maximum sequence length of log-mel filter-bank features that this model might ever be used with. max_target_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). num_conv_layers (`int`, *optional*, defaults to 2): Number of 1D convolutional layers in the conv module. conv_kernel_sizes (`Tuple[int]`, *optional*, defaults to `(5, 5)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the conv module. The length of `conv_kernel_sizes` has to match `num_conv_layers`. conv_channels (`int`, *optional*, defaults to 1024): An integer defining the number of output channels of each convolution layers except the final one in the conv module. input_feat_per_channel (`int`, *optional*, defaults to 80): An integer specifying the size of feature vector. This is also the dimensions of log-mel filter-bank features. input_channels (`int`, *optional*, defaults to 1): An integer specifying number of input channels of the input feature vector. Example: ```python >>> from transformers import Speech2TextConfig, Speech2TextModel >>> # Initializing a Speech2Text s2t_transformer_s style configuration >>> configuration = Speech2TextConfig() >>> # Initializing a model (with random weights) from the s2t_transformer_s style configuration >>> model = Speech2TextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "speech_to_text" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=10000, encoder_layers=12, encoder_ffn_dim=2048, encoder_attention_heads=4, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=4, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, max_source_positions=6000, max_target_positions=1024, num_conv_layers=2, conv_kernel_sizes=(5, 5), conv_channels=1024, input_feat_per_channel=80, input_channels=1, **kwargs, ): self.vocab_size = vocab_size self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.num_conv_layers = num_conv_layers self.conv_kernel_sizes = list(conv_kernel_sizes) self.conv_channels = conv_channels self.input_feat_per_channel = input_feat_per_channel self.input_channels = input_channels if len(self.conv_kernel_sizes) != self.num_conv_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` " f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, " f"`config.num_conv_layers = {self.num_conv_layers}`." ) super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs, )
transformers/src/transformers/models/speech_to_text/configuration_speech_to_text.py/0
{ "file_path": "transformers/src/transformers/models/speech_to_text/configuration_speech_to_text.py", "repo_id": "transformers", "token_count": 3953 }
344
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for SpeechT5.""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging logger = logging.get_logger(__name__) class SpeechT5FeatureExtractor(SequenceFeatureExtractor): r""" Constructs a SpeechT5 feature extractor. This class can pre-process a raw speech signal by (optionally) normalizing to zero-mean unit-variance, for use by the SpeechT5 speech encoder prenet. This class can also extract log-mel filter bank features from raw speech, for use by the SpeechT5 speech decoder prenet. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values. do_normalize (`bool`, *optional*, defaults to `False`): Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly improve the performance for some models. num_mel_bins (`int`, *optional*, defaults to 80): The number of mel-frequency bins in the extracted spectrogram features. hop_length (`int`, *optional*, defaults to 16): Number of ms between windows. Otherwise referred to as "shift" in many papers. win_length (`int`, *optional*, defaults to 64): Number of ms per window. win_function (`str`, *optional*, defaults to `"hann_window"`): Name for the window function used for windowing, must be accessible via `torch.{win_function}` frame_signal_scale (`float`, *optional*, defaults to 1.0): Constant multiplied in creating the frames before applying DFT. This argument is deprecated. fmin (`float`, *optional*, defaults to 80): Minimum mel frequency in Hz. fmax (`float`, *optional*, defaults to 7600): Maximum mel frequency in Hz. mel_floor (`float`, *optional*, defaults to 1e-10): Minimum value of mel frequency banks. reduction_factor (`int`, *optional*, defaults to 2): Spectrogram length reduction factor. This argument is deprecated. return_attention_mask (`bool`, *optional*, defaults to `True`): Whether or not [`~SpeechT5FeatureExtractor.__call__`] should return `attention_mask`. """ model_input_names = ["input_values", "attention_mask"] def __init__( self, feature_size: int = 1, sampling_rate: int = 16000, padding_value: float = 0.0, do_normalize: bool = False, num_mel_bins: int = 80, hop_length: int = 16, win_length: int = 64, win_function: str = "hann_window", frame_signal_scale: float = 1.0, fmin: float = 80, fmax: float = 7600, mel_floor: float = 1e-10, reduction_factor: int = 2, return_attention_mask: bool = True, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.do_normalize = do_normalize self.return_attention_mask = return_attention_mask self.num_mel_bins = num_mel_bins self.hop_length = hop_length self.win_length = win_length self.win_function = win_function self.frame_signal_scale = frame_signal_scale self.fmin = fmin self.fmax = fmax self.mel_floor = mel_floor self.reduction_factor = reduction_factor self.sample_size = win_length * sampling_rate // 1000 self.sample_stride = hop_length * sampling_rate // 1000 self.n_fft = optimal_fft_length(self.sample_size) self.n_freqs = (self.n_fft // 2) + 1 self.window = window_function(window_length=self.sample_size, name=self.win_function, periodic=True) self.mel_filters = mel_filter_bank( num_frequency_bins=self.n_freqs, num_mel_filters=self.num_mel_bins, min_frequency=self.fmin, max_frequency=self.fmax, sampling_rate=self.sampling_rate, norm="slaney", mel_scale="slaney", ) if frame_signal_scale != 1.0: warnings.warn( "The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers", FutureWarning, ) if reduction_factor != 2.0: warnings.warn( "The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers", FutureWarning, ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def zero_mean_unit_var_norm( input_values: List[np.ndarray], attention_mask: List[np.ndarray], padding_value: float = 0.0 ) -> List[np.ndarray]: """ Every array in the list is normalized to have zero mean and unit variance """ if attention_mask is not None: attention_mask = np.array(attention_mask, np.int32) normed_input_values = [] for vector, length in zip(input_values, attention_mask.sum(-1)): normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7) if length < normed_slice.shape[0]: normed_slice[length:] = padding_value normed_input_values.append(normed_slice) else: normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values] return normed_input_values def _extract_mel_features( self, one_waveform: np.ndarray, ) -> np.ndarray: """ Extracts log-mel filterbank features for one waveform array (unbatched). """ log_mel_spec = spectrogram( one_waveform, window=self.window, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, mel_filters=self.mel_filters, mel_floor=self.mel_floor, log_mel="log10", ) return log_mel_spec.T def __call__( self, audio: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None, audio_target: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None, padding: Union[bool, str, PaddingStrategy] = False, max_length: Optional[int] = None, truncation: bool = False, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, **kwargs, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Pass in a value for `audio` to extract waveform features. Pass in a value for `audio_target` to extract log-mel spectrogram features. Args: audio (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`, *optional*): The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. This outputs waveform features. Must be mono channel audio, not stereo, i.e. single float per timestep. audio_target (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`, *optional*): The sequence or batch of sequences to be processed as targets. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. This outputs log-mel spectrogram features. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`): Activates truncation to cut input sequences longer than *max_length* to *max_length*. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `audio` or `audio_target` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. """ if audio is None and audio_target is None: raise ValueError("You must provide either `audio` or `audio_target` values.") if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if audio is not None: inputs = self._process_audio( audio, False, padding, max_length, truncation, pad_to_multiple_of, return_attention_mask, return_tensors, **kwargs, ) else: inputs = None if audio_target is not None: inputs_target = self._process_audio( audio_target, True, padding, max_length, truncation, pad_to_multiple_of, return_attention_mask, return_tensors, **kwargs, ) if inputs is None: return inputs_target else: inputs["labels"] = inputs_target["input_values"] decoder_attention_mask = inputs_target.get("attention_mask") if decoder_attention_mask is not None: inputs["decoder_attention_mask"] = decoder_attention_mask return inputs def _process_audio( self, speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], is_target: bool = False, padding: Union[bool, str, PaddingStrategy] = False, max_length: Optional[int] = None, truncation: bool = False, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: is_batched_numpy = isinstance(speech, np.ndarray) and len(speech.shape) > 1 if is_batched_numpy and len(speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(speech, (list, tuple)) and (isinstance(speech[0], (np.ndarray, tuple, list))) ) if is_batched: speech = [np.asarray(speech, dtype=np.float32) for speech in speech] elif not is_batched and not isinstance(speech, np.ndarray): speech = np.asarray(speech, dtype=np.float32) elif isinstance(speech, np.ndarray) and speech.dtype is np.dtype(np.float64): speech = speech.astype(np.float32) # always return batch if not is_batched: speech = [speech] # needed to make pad() work on spectrogram inputs feature_size_hack = self.feature_size # convert into correct format for padding if is_target: features = [self._extract_mel_features(waveform) for waveform in speech] encoded_inputs = BatchFeature({"input_values": features}) self.feature_size = self.num_mel_bins else: encoded_inputs = BatchFeature({"input_values": speech}) padded_inputs = self.pad( encoded_inputs, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, **kwargs, ) self.feature_size = feature_size_hack # convert input values to correct format input_values = padded_inputs["input_values"] if not isinstance(input_values[0], np.ndarray): padded_inputs["input_values"] = [np.asarray(array, dtype=np.float32) for array in input_values] elif ( not isinstance(input_values, np.ndarray) and isinstance(input_values[0], np.ndarray) and input_values[0].dtype is np.dtype(np.float64) ): padded_inputs["input_values"] = [array.astype(np.float32) for array in input_values] elif isinstance(input_values, np.ndarray) and input_values.dtype is np.dtype(np.float64): padded_inputs["input_values"] = input_values.astype(np.float32) # convert attention_mask to correct format attention_mask = padded_inputs.get("attention_mask") if attention_mask is not None: padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: attention_mask = ( attention_mask if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD else None ) padded_inputs["input_values"] = self.zero_mean_unit_var_norm( padded_inputs["input_values"], attention_mask=attention_mask, padding_value=self.padding_value ) if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs def to_dict(self) -> Dict[str, Any]: output = super().to_dict() # Don't serialize these as they are derived from the other properties. names = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"] for name in names: if name in output: del output[name] return output
transformers/src/transformers/models/speecht5/feature_extraction_speecht5.py/0
{ "file_path": "transformers/src/transformers/models/speecht5/feature_extraction_speecht5.py", "repo_id": "transformers", "token_count": 7605 }
345
# coding=utf-8 # Copyright 2024 Stability AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ StableLM model configuration """ from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP = { "stabilityai/stablelm-3b-4e1t": "https://huggingface.co/stabilityai/stablelm-3b-4e1t/resolve/main/config.json", # See all StableLM models at https://huggingface.co/models?filter=stablelm } class StableLmConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`~StableLmModel`]. It is used to instantiate an StableLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the StableLM [stabilityai/stablelm-3b-4e1t](https://huggingface.co/stabilityai/stablelm-3b-4e1t) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50304): Vocabulary size of the StableLM model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`StableLmModel`]. intermediate_size (`int`, *optional*, defaults to 6912): Dimension of the MLP representations. hidden_size (`int`, *optional*, defaults to 2560): Number of hidden layers in the Transformer decoder. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 32): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string). max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_theta (`float`, *optional*, defaults to `10000.0`): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. use_qkv_bias (`bool`, *optional*, defaults to `False`): Whether or not the model should use bias for qkv layers. hidden_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio after applying the MLP to the hidden states. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. partial_rotary_factor (`float`, *optional*, defaults to 0.25): Percentage of the query and keys which will have rotary embedding. bos_token_id (int, *optional*, defaults to 0): The id of the `BOS` token in the vocabulary. eos_token_id (int, *optional*, defaults to 0): The id of the `EOS` token in the vocabulary. Example: ```python >>> from transformers import StableLmModel, StableLmConfig >>> # Initializing a StableLM stablelm-3b style configuration >>> configuration = StableLmConfig() ```""" model_type = "stablelm" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=50304, intermediate_size=6912, hidden_size=2560, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=32, hidden_act="silu", max_position_embeddings=4096, initializer_range=0.02, layer_norm_eps=1.0e-5, use_cache=True, tie_word_embeddings=False, rope_theta=10_000, rope_scaling=None, use_qkv_bias=False, hidden_dropout=0.0, attention_dropout=0.0, partial_rotary_factor=0.25, bos_token_id=0, eos_token_id=0, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.use_qkv_bias = use_qkv_bias self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.partial_rotary_factor = partial_rotary_factor self._rope_scaling_validation() super().__init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " f"got {self.rope_scaling}" ) rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_factor = self.rope_scaling.get("factor", None) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
transformers/src/transformers/models/stablelm/configuration_stablelm.py/0
{ "file_path": "transformers/src/transformers/models/stablelm/configuration_stablelm.py", "repo_id": "transformers", "token_count": 3547 }
346
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Swin SimMIM checkpoints from the original repository. URL: https://github.com/microsoft/Swin-Transformer/blob/main/MODELHUB.md#simmim-pretrained-swin-v1-models""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def get_swin_config(model_name): config = SwinConfig(image_size=192) if "base" in model_name: window_size = 6 embed_dim = 128 depths = (2, 2, 18, 2) num_heads = (4, 8, 16, 32) elif "large" in model_name: window_size = 12 embed_dim = 192 depths = (2, 2, 18, 2) num_heads = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants") config.window_size = window_size config.embed_dim = embed_dim config.depths = depths config.num_heads = num_heads return config def rename_key(name): if "encoder.mask_token" in name: name = name.replace("encoder.mask_token", "embeddings.mask_token") if "encoder.patch_embed.proj" in name: name = name.replace("encoder.patch_embed.proj", "embeddings.patch_embeddings.projection") if "encoder.patch_embed.norm" in name: name = name.replace("encoder.patch_embed.norm", "embeddings.norm") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if name == "encoder.norm.weight": name = "layernorm.weight" if name == "encoder.norm.bias": name = "layernorm.bias" if "decoder" in name: pass else: name = "swin." + name return name def convert_state_dict(orig_state_dict, model): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if "attn_mask" in key: pass elif "qkv" in key: key_split = key.split(".") layer_num = int(key_split[2]) block_num = int(key_split[4]) dim = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: orig_state_dict[ f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight" ] = val[:dim, :] orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"] = val[ dim : dim * 2, : ] orig_state_dict[ f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight" ] = val[-dim:, :] else: orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"] = val[ :dim ] orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"] = val[ dim : dim * 2 ] orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"] = val[ -dim: ] else: orig_state_dict[rename_key(key)] = val return orig_state_dict def convert_swin_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub): state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] config = get_swin_config(model_name) model = SwinForMaskedImageModeling(config) model.eval() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) url = "http://images.cocodataset.org/val2017/000000039769.jpg" image_processor = ViTImageProcessor(size={"height": 192, "width": 192}) image = Image.open(requests.get(url, stream=True).raw) inputs = image_processor(images=image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs).logits print(outputs.keys()) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and image processor for {model_name} to hub") model.push_to_hub(f"microsoft/{model_name}") image_processor.push_to_hub(f"microsoft/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="swin-base-simmim-window6-192", type=str, choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"], help="Name of the Swin SimMIM model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth", type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/swin/convert_swin_simmim_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/swin/convert_swin_simmim_to_pytorch.py", "repo_id": "transformers", "token_count": 2896 }
347
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert SwitchTransformersX checkpoints from the original repository to JAX/FLAX model.""" import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from t5x import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument MOE_LAYER_NAME_MAPPING = { "/attention/": "/0/SelfAttention/", "/self_attention/": "/0/SelfAttention/", "/encoder_decoder_attention/": "/1/EncDecAttention/", "value": "v", "query": "q", "key": "k", "out": "o", "pre_self_attention_layer_norm": "0/layer_norm", "pre_cross_attention_layer_norm": "1/layer_norm", "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong "token_embedder": "shared", "encoder_norm": "final_layer_norm", "decoder_norm": "final_layer_norm", "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight", "router/router_weights/w/": "router/classifier/", "roer/roer_weights/w/": "router/classifier/", "logits_dense": "lm_head", } def rename_keys(s_dict): # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model keys = list(s_dict.keys()) for key in keys: layer_to_block_of_layer = r".*/layers_(\d+)" new_key = key if re.match(layer_to_block_of_layer, key): new_key = re.sub(r"layers_(\d+)", r"block/\1/layer", new_key) layer_to_block_of_layer = r"(encoder|decoder)\/" if re.match(layer_to_block_of_layer, key): groups = re.match(layer_to_block_of_layer, new_key).groups() if groups[0] == "encoder": new_key = re.sub(r"/mlp/", r"/1/mlp/", new_key) new_key = re.sub(r"/pre_mlp_layer_norm/", r"/1/layer_norm/", new_key) elif groups[0] == "decoder": new_key = re.sub(r"/mlp/", r"/2/mlp/", new_key) new_key = re.sub(r"/pre_mlp_layer_norm/", r"/2/layer_norm/", new_key) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: new_key = new_key.replace(old_key, temp_key) print(f"{key} -> {new_key}") s_dict[new_key] = s_dict.pop(key) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: s_dict["encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"] = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: s_dict["decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"] = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys()): if "expert" in key: num_experts = s_dict[key].shape[0] expert_weihts = s_dict[key] for idx in range(num_experts): s_dict[key.replace("expert/", f"experts/expert_{idx}/")] = expert_weihts[idx] print(f"{key} -> {key.replace('expert/', f'experts/expert_{idx}/')}") s_dict.pop(key) return s_dict GIN_TO_CONFIG_MAPPING = { "NUM_ENCODER_LAYERS": "num_layers", "NUM_DECODER_LAYERS": "num_decoder_layers", "NUM_HEADS": "num_heads", "HEAD_DIM": "d_kv", "EMBED_DIM": "d_model", "MLP_DIM": "d_ff", "NUM_SELECTED_EXPERTS": "num_selected_experts", "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers", "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers", "dense.MlpBlock.activations": "feed_forward_proj", } def convert_gin_to_config(gin_file, num_experts): # Convert a google style config to the hugging face fromat import regex as re with open(gin_file, "r") as f: raw_gin = f.read() regex_match = re.findall(r"(.*) = ([0-9.]*)", raw_gin) args = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": args[GIN_TO_CONFIG_MAPPING[param]] = float(value) if "." in value else int(value) activation = re.findall(r"(.*activations) = \(\'(.*)\',\)", raw_gin)[0] args[GIN_TO_CONFIG_MAPPING[activation[0]]] = str(activation[1]) args["num_experts"] = num_experts config = SwitchTransformersConfig(**args) return config def convert_flax_checkpoint_to_pytorch( flax_checkpoint_path, config_file, gin_file=None, pytorch_dump_path="./", num_experts=8 ): # Initialise PyTorch model print(f"Loading flax weights from : {flax_checkpoint_path}") flax_params = checkpoints.load_t5x_checkpoint(flax_checkpoint_path) if gin_file is not None: config = convert_gin_to_config(gin_file, num_experts) else: config = SwitchTransformersConfig.from_pretrained(config_file) pt_model = SwitchTransformersForConditionalGeneration(config) flax_params = flax_params["target"] flax_params = flatten_dict(flax_params, sep="/") flax_params = rename_keys(flax_params) flax_params = unflatten_dict(flax_params, sep="/") # Load the flax params in the PT model load_flax_weights_in_pytorch_model(pt_model, flax_params) print(f"Save PyTorch model to {pytorch_dump_path}") pt_model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") args = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_t5x_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
transformers/src/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 3257 }
348
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Configuration for Backbone models""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class TimmBackboneConfig(PretrainedConfig): r""" This is the configuration class to store the configuration for a timm backbone [`TimmBackbone`]. It is used to instantiate a timm backbone model according to the specified arguments, defining the model. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone (`str`, *optional*): The timm checkpoint to load. num_channels (`int`, *optional*, defaults to 3): The number of input channels. features_only (`bool`, *optional*, defaults to `True`): Whether to output only the features or also the logits. use_pretrained_backbone (`bool`, *optional*, defaults to `True`): Whether to use a pretrained backbone. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). Will default to the last stage if unset. freeze_batch_norm_2d (`bool`, *optional*, defaults to `False`): Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. Example: ```python >>> from transformers import TimmBackboneConfig, TimmBackbone >>> # Initializing a timm backbone >>> configuration = TimmBackboneConfig("resnet50") >>> # Initializing a model from the configuration >>> model = TimmBackbone(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "timm_backbone" def __init__( self, backbone=None, num_channels=3, features_only=True, use_pretrained_backbone=True, out_indices=None, freeze_batch_norm_2d=False, **kwargs, ): super().__init__(**kwargs) self.backbone = backbone self.num_channels = num_channels self.features_only = features_only self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = True self.out_indices = out_indices if out_indices is not None else (-1,) self.freeze_batch_norm_2d = freeze_batch_norm_2d
transformers/src/transformers/models/timm_backbone/configuration_timm_backbone.py/0
{ "file_path": "transformers/src/transformers/models/timm_backbone/configuration_timm_backbone.py", "repo_id": "transformers", "token_count": 1106 }
349
# coding=utf-8 # Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License=, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing=, software # distributed under the License is distributed on an "AS IS" BASIS=, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch TVP Model""" import math from dataclasses import dataclass from typing import Optional, Tuple import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import prune_linear_layer from ...utils import logging from ...utils.backbone_utils import load_backbone from .configuration_tvp import TvpConfig logger = logging.get_logger(__name__) TVP_PRETRAINED_MODEL_ARCHIVE_LIST = [ "Intel/tvp-base", "Intel/tvp-base-ANet", # See all Tvp models at https://huggingface.co/models?filter=tvp ] @dataclass class TvpVideoGroundingOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Temporal-Distance IoU loss for video grounding. logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Contains start_time/duration and end_time/duration. It is the time slot of the videos corresponding to the input texts. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None class TvpLoss(nn.Module): """ This class computes the losses for `TvpForVideoGrounding`. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: losses (`List[str]`): List of all the losses to be applied. """ def __init__(self, losses): super().__init__() self.loss_map = { "iou": self.loss_iou, "distance": self.loss_distance, "duration": self.loss_duration, } for loss in losses: if loss not in self.loss_map: raise ValueError(f"Loss {loss} not supported") self.losses = losses def loss_iou(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): """ Measure the intersection over union. """ inter = torch.min(candidates_end_time, end_time) - torch.max(candidates_start_time, start_time) union = torch.max(candidates_end_time, end_time) - torch.min(candidates_start_time, start_time) iou = 1 - inter.clamp(min=0) / union return iou def loss_distance(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): """ Measure the distance of mid points. """ mid_candidates = torch.div(torch.add(candidates_start_time, candidates_end_time), 2.0) mid_groundtruth = torch.div(torch.add(start_time, end_time), 2.0) distance_diff = torch.div( torch.max(mid_candidates, mid_groundtruth) - torch.min(mid_candidates, mid_groundtruth), duration ).clamp(min=0.2) return distance_diff def loss_duration(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): """ Measure the difference of duration. """ duration_candidates = torch.sub(candidates_end_time, candidates_start_time) duration_groundtruth = torch.sub(end_time, start_time) duration_diff = torch.square(torch.div(torch.sub(duration_candidates, duration_groundtruth), duration)) duration_diff = duration_diff.clamp(min=0.4) return duration_diff def forward(self, logits, labels): """ This performs the loss computation. Args: logits (`torch.FloatTensor`): The output logits of head module. labels (`List[torch.FloatTensor]`): List of tensors ([start, end, duration]), which contains start time, end time of the video corresponding to the text, and also the duration. """ duration, start_time, end_time = labels candidates = torch.mul(logits, duration) candidates_start_time, candidates_end_time = candidates[:, 0].float(), candidates[:, 1].float() losses_dict = {} for loss in self.losses: losses_dict.update( {loss: self.loss_map[loss](start_time, end_time, candidates_start_time, candidates_end_time, duration)} ) return losses_dict class TvpVisionModel(nn.Module): def __init__(self, config): super().__init__() self.backbone = load_backbone(config) self.grid_encoder_conv = nn.Conv2d( config.backbone_config.hidden_sizes[-1], config.hidden_size, kernel_size=3, stride=1, padding=1, groups=1, bias=False, ) def forward(self, pixel_values): batch_size, num_frames, num_channels, height, width = pixel_values.shape # (batch_size * num_frames, num_channels, height, width) pixel_values = pixel_values.view(batch_size * num_frames, num_channels, height, width) grid_feat_outputs = self.backbone(pixel_values)["feature_maps"][0] grid = self.grid_encoder_conv(grid_feat_outputs) grid = nn.functional.max_pool2d(grid, kernel_size=2, stride=2) grid = nn.functional.relu(grid, inplace=True) new_channel, new_height, new_width = grid.shape[-3:] # (batch_size, num_frames, num_channels, height, width) grid = grid.view(batch_size, num_frames, new_channel, new_height, new_width) # (batch_size, num_frames, height, width, num_channels) grid = grid.permute(0, 1, 3, 4, 2) return grid class TvpVisualInputEmbedding(nn.Module): """ Takes input of both image and video (multi-frame) """ def __init__(self, config): super().__init__() # sequence embedding self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.row_position_embeddings = nn.Embedding(config.max_grid_row_position_embeddings, config.hidden_size) self.col_position_embeddings = nn.Embedding(config.max_grid_col_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(1, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def add_2d_positional_embeddings(self, grid): """ Args: grid: (batch_size, height, width, hidden_dim) Returns: grid + col_position_embeddings.view(*col_shape): (batch_size, *, height, width, hidden_dim) """ batch_size, height, width, hidden_dim = grid.shape # add row-wise position embeddings row_position_ids = torch.arange(height, dtype=torch.long, device=grid.device) # (height, ) row_position_embeddings = self.row_position_embeddings(row_position_ids) # (height, hidden_dim) row_shape = (1,) * (len(grid.shape) - 3) + (height, 1, hidden_dim) # (1, height, 1, hidden_dim) grid = grid + row_position_embeddings.view(*row_shape) # broadcast automatically # add column-wise position embeddings col_position_ids = torch.arange(width, dtype=torch.long, device=grid.device) # (width, ) col_position_embeddings = self.col_position_embeddings(col_position_ids) # (width, hidden_dim) col_shape = (batch_size, 1, width, hidden_dim) # (1, 1, width, hidden_dim) return grid + col_position_embeddings.view(*col_shape) # broadcast automatically def forward(self, grid): """ Args: grid: Array of shape (batch_size, num_frames, height, width, num_channels). It contains processed frames extracted from videos, and is generated by Tvp image preprocessor. Note, num_frames can be 1 Returns: embeddings: The embedding of grid with size (batch_size, height*width, num_channels) """ batch_size, num_frames, height, width, num_channels = grid.shape # temporal mean pooling, (batch_size, height, width, hidden_size) grid = grid.mean(1) grid = self.add_2d_positional_embeddings(grid) # image token sequence, (batch_size, height*width, num_channels) visual_tokens = grid.view(batch_size, -1, num_channels) visual_tokens_shape = visual_tokens.shape[:-1] device = visual_tokens.device # image token type embeddings. token_type_ids = torch.zeros(visual_tokens_shape, dtype=torch.long, device=device) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = visual_tokens + token_type_embeddings embeddings = self.layer_norm(embeddings) embeddings = self.dropout(embeddings) return embeddings class TvpTextInputEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.layer_norm(embeddings) embeddings = self.dropout(embeddings) return embeddings class TvpAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.attn_dropout = nn.Dropout(config.attention_probs_dropout_prob) self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return mask = torch.ones(self.num_attention_heads, self.attention_head_size) heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.query = prune_linear_layer(self.query, index) self.key = prune_linear_layer(self.key, index) self.value = prune_linear_layer(self.value, index) self.dense = prune_linear_layer(self.dense, index, dim=1) # Update hyper params and store pruned heads self.num_attention_heads = self.num_attention_heads - len(heads) self.all_head_size = self.attention_head_size * self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def _reshape(self, tensor: torch.Tensor, sequence_length: int, batch_size: int): return ( tensor.view(batch_size, sequence_length, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) .contiguous() ) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions: Optional[bool] = None, ): batch_size, sequence_length = hidden_states.shape[:2] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self._reshape(mixed_query_layer, sequence_length, batch_size) key_layer = self._reshape(mixed_key_layer, sequence_length, batch_size) value_layer = self._reshape(mixed_value_layer, sequence_length, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.attn_dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask attn_output = torch.matmul(attention_probs, value_layer) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(batch_size, sequence_length, self.all_head_size) attn_output = self.dense(attn_output) attn_output = self.dropout(attn_output) attn_output = self.layer_norm(attn_output + hidden_states) # add attentions if we output them outputs = (attn_output, attention_probs) if output_attentions else (attn_output,) return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Tvp class TvpIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class TvpOutputLayer(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.layer_norm(hidden_states + input_tensor) return hidden_states class TvpEncodeLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = TvpAttention(config) self.intermediate = TvpIntermediate(config) self.output = TvpOutputLayer(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions: Optional[bool] = None, ): self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + outputs return outputs class TvpEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([TvpEncodeLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): return_dict = return_dict if return_dict is not None else self.config.return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) all_hidden_states = () all_attentions = () for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, (head_mask[i] if head_mask is not None else None), output_attentions, ) else: layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i], output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: outputs = (hidden_states,) if output_hidden_states: outputs = outputs + (all_hidden_states,) if output_attentions: outputs = outputs + (all_attentions,) return outputs # last-layer hidden state, (all hidden states), (all attentions) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states if output_hidden_states else None, attentions=all_attentions if output_attentions else None, ) # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Tvp class TvpPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class TvpPreTrainedModel(PreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TvpConfig base_model_prefix = "model" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") if module.bias is not None: nn.init.constant_(module.bias, 0) TVP_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`TvpConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TVP_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`TvpImageProcessor`]. See [`TvpImageProcessor.__call__`] for details. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class TvpFrameDownPadPrompter(nn.Module): """ Pad frames extracted from videos only at the bottom. """ def __init__(self, config): if config.visual_prompter_apply not in ("add", "replace", "remove"): raise ValueError("`visual_prompter_apply` must be in (add, replace, remove)") super().__init__() self.visual_prompt_size = config.visual_prompt_size self.frame_num = config.frame_num self.max_img_size = config.max_img_size self.visual_prompter_apply = config.visual_prompter_apply self.pad_down = nn.Parameter( torch.randn([1, config.frame_num, 3, config.visual_prompt_size, config.max_img_size]) ) def forward(self, pixel_values): if self.visual_prompter_apply != "add": visual_prompt_mask = torch.ones( [self.max_img_size, self.max_img_size], dtype=pixel_values.dtype, device=pixel_values.device ) visual_prompt_mask[self.max_img_size - self.visual_prompt_size : self.max_img_size, :] = 0.0 pixel_values *= visual_prompt_mask if self.visual_prompter_apply != "remove": prompt = torch.zeros( [pixel_values.shape[0], pixel_values.shape[1], 3, self.max_img_size, self.max_img_size], device=pixel_values.device, ) start_point = self.max_img_size - self.visual_prompt_size prompt[:, :, :, start_point : self.max_img_size, :] = self.pad_down pixel_values += prompt.to(pixel_values.dtype) return pixel_values class TvpFramePadPrompter(nn.Module): """ Pad frames extracted from videos in the surroundings. """ def __init__(self, config): if config.visual_prompter_apply not in ("add", "replace", "remove"): raise ValueError("`visual_prompter_apply` must be in (add, replace, remove)") super().__init__() self.num_frames = config.num_frames self.max_img_size = config.max_img_size self.visual_prompter_apply = config.visual_prompter_apply self.base_size = config.max_img_size - config.visual_prompt_size * 2 self.pad_up = nn.Parameter( torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size]) ) self.pad_down = nn.Parameter( torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size]) ) self.pad_left = nn.Parameter( torch.randn( [ 1, config.num_frames, 3, config.max_img_size - config.visual_prompt_size * 2, config.visual_prompt_size, ] ) ) self.pad_right = nn.Parameter( torch.randn( [ 1, config.num_frames, 3, config.max_img_size - config.visual_prompt_size * 2, config.visual_prompt_size, ] ) ) def forward(self, pixel_values): if self.visual_prompter_apply not in ("add", "remove", "replace"): raise ValueError(f"Invalid visual_prompter_apply value {self.visual_prompter_apply}") if self.visual_prompter_apply in ("replace", "remove"): visual_prompt_mask = torch.ones( [self.max_img_size, self.max_img_size], dtype=pixel_values.dtype, device=pixel_values.device ) pixel_values *= visual_prompt_mask if self.visual_prompter_apply in ("replace", "add"): base = torch.zeros(1, self.num_frames, 3, self.base_size, self.base_size, device=pixel_values.device) prompt = torch.cat([self.pad_left, base, self.pad_right], dim=4) prompt = torch.cat([self.pad_up, prompt, self.pad_down], dim=3) prompt = torch.cat(pixel_values.size(0) * [prompt]) pixel_values = pixel_values + prompt.to(pixel_values.dtype) return pixel_values TVP_PROMPTER_CLASSES_MAPPING = { "framedownpad": TvpFrameDownPadPrompter, "framepad": TvpFramePadPrompter, } @add_start_docstrings( "The bare Tvp Model transformer outputting BaseModelOutputWithPooling object without any specific head on" " top.", TVP_START_DOCSTRING, ) class TvpModel(TvpPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.vision_model = TvpVisionModel(config) self.embeddings = TvpTextInputEmbeddings(config) self.visual_embeddings = TvpVisualInputEmbedding(config) self.encoder = TvpEncoder(config) self.pooler = TvpPooler(config) self.text_prompt = nn.Parameter(torch.randn([1, 10, config.hidden_size])) self.dropout = nn.Dropout(config.hidden_dropout_prob) if config.visual_prompter_type not in TVP_PROMPTER_CLASSES_MAPPING: raise ValueError("`visual_prompter_type` must be in (framedownpad, framepad)") self.visual_prompter = TVP_PROMPTER_CLASSES_MAPPING[config.visual_prompter_type](config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(TVP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=TvpConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Returns: Examples: ```python >>> import torch >>> from transformers import AutoConfig, AutoTokenizer, TvpModel >>> model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp") >>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp") >>> pixel_values = torch.rand(1, 1, 3, 448, 448) >>> text_inputs = tokenizer("This is an example input", return_tensors="pt") >>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask) ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict # Add visual prompt, it compensates for the spatiotemporal information loss in 2D visual features. pixel_values = self.vision_model(self.visual_prompter(pixel_values)) # (batch_size, sequence_length, hidden_size) text_embedding_output = self.embeddings(input_ids=input_ids) # (batch_size, visual_sequence_length, hidden_size) visual_embedding_output = self.visual_embeddings(pixel_values) if attention_mask is not None: # (batch_size, visual_sequence_length) visual_attention_mask = attention_mask.new_ones(visual_embedding_output.shape[:2]) pt_mask = torch.ones(attention_mask.shape[0], 10).to( device=attention_mask.device, dtype=attention_mask.dtype ) attention_mask = torch.cat([pt_mask, attention_mask, visual_attention_mask], dim=-1) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.size()).to(input_ids.device) text_prompt = self.text_prompt.expand(text_embedding_output.shape[0], -1, -1) # (batch_size, sequence_length + visual_sequence_length, hidden_size) embedding_output = torch.cat([text_prompt, text_embedding_output, visual_embedding_output], dim=1) encoder_outputs = self.encoder( embedding_output, attention_mask=attention_mask, head_mask=self.get_head_mask(head_mask, self.config.num_hidden_layers), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs.last_hidden_state if return_dict else encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) last_hidden_state = self.dropout(last_hidden_state) pooled_output = self.dropout(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class TvpVideoGroundingHead(nn.Module): def __init__(self, config): super().__init__() self.layer_0 = nn.Linear(config.hidden_size, config.hidden_size * 2) self.layer_1 = nn.Linear(config.hidden_size * 2, 2) self.activation_0 = nn.ReLU() self.activation_1 = nn.Sigmoid() def forward(self, pooler_output): logits = self.activation_0(self.layer_0(pooler_output)) logits = self.activation_1(self.layer_1(logits)) return logits @add_start_docstrings( """ Tvp Model with a video grounding head on top computing IoU, distance, and duration loss. """, TVP_START_DOCSTRING, ) class TvpForVideoGrounding(TvpPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.model = TvpModel(config) self.video_grounding_head = TvpVideoGroundingHead(config) self.post_init() @add_start_docstrings_to_model_forward(TVP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TvpVideoGroundingOutput, config_class=TvpConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, labels: Tuple[torch.Tensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" labels (`torch.FloatTensor` of shape `(batch_size, 3)`, *optional*): The labels contains duration, start time, and end time of the video corresponding to the text. Returns: Examples: ```python >>> import torch >>> from transformers import AutoConfig, AutoTokenizer, TvpForVideoGrounding >>> model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp") >>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp") >>> pixel_values = torch.rand(1, 1, 3, 448, 448) >>> text_inputs = tokenizer("This is an example input", return_tensors="pt") >>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask) ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict outputs = self.model( input_ids, pixel_values, attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooler_output = outputs[1] logits = self.video_grounding_head(pooler_output) loss = None if labels is not None: criterion = TvpLoss(["iou", "distance", "duration"]) criterion.to(self.device) loss_dict = criterion(logits, labels) loss = ( loss_dict["iou"] + self.config.distance_loss_weight * loss_dict["distance"] + self.config.duration_loss_weight * loss_dict["duration"] ) if not return_dict: outputs = (logits,) + outputs[2:] if loss is not None: outputs = (loss,) + outputs return outputs return TvpVideoGroundingOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/tvp/modeling_tvp.py/0
{ "file_path": "transformers/src/transformers/models/tvp/modeling_tvp.py", "repo_id": "transformers", "token_count": 16445 }
350
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch UniSpeech model.""" import math import warnings from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, Wav2Vec2BaseModelOutput from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_unispeech import UniSpeechConfig logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 2 # General docstring _CONFIG_FOR_DOC = "UniSpeechConfig" # Base docstring _CHECKPOINT_FOR_DOC = "patrickvonplaten/unispeech-large-1500h-cv-timit" _EXPECTED_OUTPUT_SHAPE = [1, 292, 1024] # CTC docstring _CTC_EXPECTED_OUTPUT = "'mister quilter is the apposl of the midle classes and weare glad to welcom his gosepl'" _CTC_EXPECTED_LOSS = 17.17 UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/unispeech-large-1500h-cv", "microsoft/unispeech-large-multi-lingual-1500h-cv", # See all UniSpeech models at https://huggingface.co/models?filter=unispeech ] @dataclass class UniSpeechForPreTrainingOutput(ModelOutput): """ Output type of [`UniSpeechForPreTrainingOutput`], with potential hidden states and attentions. Args: loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss. projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None projected_states: torch.FloatTensor = None projected_quantized_states: torch.FloatTensor = None codevector_perplexity: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" f" and `sequence_length`: {sequence_length}`" ) # epsilon is used for probabilistic rounding epsilon = np.random.rand(1).item() def compute_num_masked_span(input_length): """Given input length, compute how many spans should be masked""" num_masked_span = int(mask_prob * input_length / mask_length + epsilon) num_masked_span = max(num_masked_span, min_masks) # make sure num masked span <= sequence_length if num_masked_span * mask_length > sequence_length: num_masked_span = sequence_length // mask_length # make sure num_masked span is also <= input_length - (mask_length - 1) if input_length - (mask_length - 1) < num_masked_span: num_masked_span = max(input_length - (mask_length - 1), 0) return num_masked_span # compute number of masked spans in batch input_lengths = ( attention_mask.sum(-1).detach().tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)] ) # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) spec_aug_mask_idxs = [] max_num_masked_span = compute_num_masked_span(sequence_length) if max_num_masked_span == 0: return spec_aug_mask for input_length in input_lengths: # compute num of masked spans for this input num_masked_span = compute_num_masked_span(input_length) # get random indices to mask spec_aug_mask_idx = np.random.choice( np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False ) # pick first sampled index that will serve as a dummy index to pad vector # to ensure same dimension for all batches due to probabilistic rounding # Picking first sample just pads those vectors twice. if len(spec_aug_mask_idx) == 0: # this case can only happen if `input_length` is strictly smaller then # `sequence_length` in which case the last token has to be a padding # token which we can use as a dummy mask id dummy_mask_idx = sequence_length - 1 else: dummy_mask_idx = spec_aug_mask_idx[0] spec_aug_mask_idx = np.concatenate( [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] ) spec_aug_mask_idxs.append(spec_aug_mask_idx) spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to( spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) ) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) # add offset to the starting indexes so that indexes now create a span offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( batch_size, max_num_masked_span * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # ensure that we cannot have indices larger than sequence_length if spec_aug_mask_idxs.max() > sequence_length - 1: spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) return spec_aug_mask # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->UniSpeech class UniSpeechNoLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->UniSpeech class UniSpeechLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->UniSpeech class UniSpeechGroupNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->UniSpeech class UniSpeechPositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups, ) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = weight_norm(self.conv, name="weight", dim=2) deepspeed.zero.register_external_parameter(self, self.conv.weight_v) deepspeed.zero.register_external_parameter(self, self.conv.weight_g) else: self.conv = weight_norm(self.conv, name="weight", dim=2) self.padding = UniSpeechSamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->UniSpeech class UniSpeechSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, : -self.num_pad_remove] return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->UniSpeech class UniSpeechFeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" def __init__(self, config): super().__init__() if config.feat_extract_norm == "group": conv_layers = [UniSpeechGroupNormConvLayer(config, layer_id=0)] + [ UniSpeechNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [ UniSpeechLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers) ] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] # make sure hidden_states require grad for gradient_checkpointing if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: if self._requires_grad and self.gradient_checkpointing and self.training: hidden_states = self._gradient_checkpointing_func( conv_layer.__call__, hidden_states, ) else: hidden_states = conv_layer(hidden_states) return hidden_states class UniSpeechFeatureExtractor(UniSpeechFeatureEncoder): def __init__(self, config): super().__init__(config) warnings.warn( f"The class `{self.__class__.__name__}` has been depreciated " "and will be removed in Transformers v5. " f"Use `{self.__class__.__bases__[0].__name__}` instead.", FutureWarning, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->UniSpeech class UniSpeechFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states, norm_hidden_states # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->UniSpeech class UniSpeechAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[UniSpeechConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->UniSpeech class UniSpeechFeedForward(nn.Module): def __init__(self, config): super().__init__() self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->UniSpeech class UniSpeechEncoderLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = UniSpeechAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = UniSpeechFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, output_attentions=False): attn_residual = hidden_states hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AttnAdapterLayer with Wav2Vec2->UniSpeech class UniSpeechAttnAdapterLayer(nn.Module): def __init__(self, config): """ Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed up training throughput. """ super().__init__() self.input_dim = config.adapter_attn_dim self.hidden_dim = config.hidden_size self.norm = nn.LayerNorm(self.hidden_dim) self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim) self.act_fn = nn.ReLU() self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim) def forward(self, hidden_states: torch.FloatTensor): hidden_states = self.norm(hidden_states) hidden_states = self.linear_1(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->UniSpeech class UniSpeechEncoderLayerStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.attention = UniSpeechAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = UniSpeechFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if getattr(config, "adapter_attn_dim", None) is not None: self.adapter_layer = UniSpeechAttnAdapterLayer(config) else: self.adapter_layer = None def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) if self.adapter_layer is not None: hidden_states = hidden_states + self.adapter_layer(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->UniSpeech class UniSpeechEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([UniSpeechEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens output 0 expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 # extend attention_mask attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand( attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->UniSpeech class UniSpeechEncoderStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList( [UniSpeechEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens are not attended to expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 # extend attention_mask attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand( attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class UniSpeechGumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information. """ def __init__(self, config): super().__init__() self.num_groups = config.num_codevector_groups self.num_vars = config.num_codevectors_per_group if config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {config.codevector_dim} must be divisible by `config.num_codevector_groups`" f" {self.num_groups} for concatenation" ) # storage for codebook variables (codewords) self.codevectors = nn.Parameter( torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups) ) self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars) # can be decayed for training self.temperature = 2 @staticmethod def _compute_perplexity(probs): marginal_probs = probs.mean(dim=0) perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum() return perplexity def forward(self, hidden_states): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1) if self.training: # sample code vector probs via gumbel in differentiateable way codevector_probs = nn.functional.gumbel_softmax( hidden_states.float(), tau=self.temperature, hard=True ).type_as(hidden_states) # compute perplexity codevector_soft_dist = torch.softmax( hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(dim=-1) codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_( -1, codevector_idx.view(-1, 1), 1.0 ) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs) codevector_probs = codevector_probs.view(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1) return codevectors, perplexity class UniSpeechPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = UniSpeechConfig base_model_prefix = "unispeech" main_input_name = "input_values" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" # gumbel softmax requires special init if isinstance(module, UniSpeechGumbelVectorQuantizer): module.weight_proj.weight.data.normal_(mean=0.0, std=1) module.weight_proj.bias.data.zero_() nn.init.uniform_(module.codevectors) elif isinstance(module, UniSpeechPositionalConvEmbedding): nn.init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) nn.init.constant_(module.conv.bias, 0) elif isinstance(module, UniSpeechFeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask UNISPEECH_START_DOCSTRING = r""" UniSpeech was proposed in [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.). This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`UniSpeechConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UNISPEECH_INPUTS_DOCSTRING = r""" Args: input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) <Tip warning={true}> `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different results depending on whether `input_values` is padded or not. </Tip> output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top.", UNISPEECH_START_DOCSTRING, ) class UniSpeechModel(UniSpeechPreTrainedModel): def __init__(self, config: UniSpeechConfig): super().__init__(config) self.config = config self.feature_extractor = UniSpeechFeatureEncoder(config) self.feature_projection = UniSpeechFeatureProjection(config) if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_()) if config.do_stable_layer_norm: self.encoder = UniSpeechEncoderStableLayerNorm(config) else: self.encoder = UniSpeechEncoder(config) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://arxiv.org/abs/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states # generate indices & apply SpecAugment along time axis batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Wav2Vec2BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Wav2Vec2BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict extract_features = self.feature_extractor(input_values) extract_features = extract_features.transpose(1, 2) if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask) hidden_states, extract_features = self.feature_projection(extract_features) hidden_states = self._mask_hidden_states( hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return Wav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """UniSpeech Model with a vector-quantization module and ctc loss for pre-training.""", UNISPEECH_START_DOCSTRING ) class UniSpeechForPreTraining(UniSpeechPreTrainedModel): def __init__(self, config: UniSpeechConfig): super().__init__(config) self.unispeech = UniSpeechModel(config) self.dropout_features = nn.Dropout(config.feat_quantizer_dropout) self.quantizer = UniSpeechGumbelVectorQuantizer(config) self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim) self.project_hid = nn.Linear(config.proj_codevector_dim, config.hidden_size) self.ctc_proj = nn.Linear(config.hidden_size, config.num_ctc_classes) self.dropout = nn.Dropout(config.final_dropout) # Initialize weights and apply final processing self.post_init() def set_gumbel_temperature(self, temperature: int): """ Set the Gumbel softmax temperature to a given value. Only necessary for training """ self.quantizer.temperature = temperature def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech.feature_extractor._freeze_parameters() @staticmethod def compute_contrastive_logits( target_features: torch.FloatTensor, negative_features: torch.FloatTensor, predicted_features: torch.FloatTensor, temperature: int = 1, ): """ Compute logits for contrastive loss based using cosine similarity as the distance measure between `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied. """ target_features = torch.cat([target_features, negative_features], dim=0) logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1) logits = logits.type_as(target_features) # apply temperature logits = logits / temperature return logits @add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=UniSpeechForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, UniSpeechForPreTrainingOutput]: r""" mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. sampled_negative_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_negatives)`, *optional*): Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss. Required input for pre-training. Returns: Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, UniSpeechForPreTraining >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-large-1500h-cv") >>> model = UniSpeechForPreTraining.from_pretrained("microsoft/unispeech-large-1500h-cv") >>> # TODO: Add full pretraining example ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.unispeech( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) transformer_features = outputs[0] # quantize all (unmasked) extracted features and project to final vq dim extract_features = self.dropout_features(outputs[1]) quantized_features, codevector_perplexity = self.quantizer(extract_features) # project quantized features twice quantized_features = self.project_q(quantized_features) quantized_features = self.project_hid(quantized_features) prob_replace_matrix = torch.empty(transformer_features.size(0), transformer_features.size(1)).fill_( self.config.replace_prob ) prob_replace_matrix = prob_replace_matrix.transpose(0, 1) sampled_replace_matrix = torch.bernoulli(prob_replace_matrix).bool().to(transformer_features.device) sampled_replace_matrix = sampled_replace_matrix.transpose(0, 1) sampled_replace_matrix = sampled_replace_matrix.unsqueeze(-1) logits = transformer_features.masked_fill(sampled_replace_matrix, 0.0) + ( quantized_features.masked_fill(~sampled_replace_matrix, 0.0) ) # project to ctc units logits = self.dropout(logits) logits = self.ctc_proj(logits) # TODO(PVP) - add negative sampling & loss computation loss = None if not return_dict: if loss is not None: return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return UniSpeechForPreTrainingOutput( loss=loss, projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", UNISPEECH_START_DOCSTRING, """ target_lang (`str`, *optional*): Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or adapter.<lang>.bin. Only relevant when using an instance of [`UniSpeechForCTC`] with adapters. Uses 'eng' by default. """, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->UniSpeech, wav2vec2->unispeech, WAV_2_VEC_2->UNISPEECH class UniSpeechForCTC(UniSpeechPreTrainedModel): def __init__(self, config, target_lang: Optional[str] = None): super().__init__(config) self.unispeech = UniSpeechModel(config) self.dropout = nn.Dropout(config.final_dropout) self.target_lang = target_lang if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that " "does not define the vocabulary size of the language model head. Please " "instantiate the model as follows: `UniSpeechForCTC.from_pretrained(..., vocab_size=vocab_size)`. " "or define `vocab_size` of your model's configuration." ) output_hidden_size = ( config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size ) self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() def tie_weights(self): """ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when passing `target_lang=...` to `from_pretrained(...)`. This method is **not** supposed to be called by the user and is prone to be changed in the future. """ # Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to # correctly load adapter layers for UniSpeech so that we do not have to introduce a new API to # [`PreTrainedModel`]. While slightly hacky, UniSpeech never has to tie input and output embeddings, so that it is # ok to repurpose this function here. target_lang = self.target_lang if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None: raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.") elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None: logger.info("By default `target_lang` is set to 'eng'.") elif target_lang is not None: self.load_adapter(target_lang, force_load=True) def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.unispeech( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: if labels.max() >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long) ) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) @add_start_docstrings( """ UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting. """, UNISPEECH_START_DOCSTRING, ) class UniSpeechForSequenceClassification(UniSpeechPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Sequence classification does not support the use of UniSpeech adapters (config.add_adapter=True)" ) self.unispeech = UniSpeechModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_feature_extractor def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_feature_encoder with wav2vec2->unispeech def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech.feature_extractor._freeze_parameters() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_base_model with wav2vec2->unispeech def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.unispeech.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.forward with Wav2Vec2->UniSpeech, wav2vec2->unispeech def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.unispeech( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) hidden_states[~padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/unispeech/modeling_unispeech.py/0
{ "file_path": "transformers/src/transformers/models/unispeech/modeling_unispeech.py", "repo_id": "transformers", "token_count": 30778 }
351
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = { "configuration_videomae": ["VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VideoMAEConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_videomae"] = [ "VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST", "VideoMAEForPreTraining", "VideoMAEModel", "VideoMAEPreTrainedModel", "VideoMAEForVideoClassification", ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["feature_extraction_videomae"] = ["VideoMAEFeatureExtractor"] _import_structure["image_processing_videomae"] = ["VideoMAEImageProcessor"] if TYPE_CHECKING: from .configuration_videomae import VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP, VideoMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_videomae import ( VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, VideoMAEPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_videomae import VideoMAEFeatureExtractor from .image_processing_videomae import VideoMAEImageProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/videomae/__init__.py/0
{ "file_path": "transformers/src/transformers/models/videomae/__init__.py", "repo_id": "transformers", "token_count": 943 }
352
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ViT MAE checkpoints from the original repository: https://github.com/facebookresearch/mae""" import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def rename_key(name): if "cls_token" in name: name = name.replace("cls_token", "vit.embeddings.cls_token") if "mask_token" in name: name = name.replace("mask_token", "decoder.mask_token") if "decoder_pos_embed" in name: name = name.replace("decoder_pos_embed", "decoder.decoder_pos_embed") if "pos_embed" in name and "decoder" not in name: name = name.replace("pos_embed", "vit.embeddings.position_embeddings") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "vit.embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: name = name.replace("patch_embed.norm", "vit.embeddings.norm") if "decoder_blocks" in name: name = name.replace("decoder_blocks", "decoder.decoder_layers") if "blocks" in name: name = name.replace("blocks", "vit.encoder.layer") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "decoder_embed" in name: name = name.replace("decoder_embed", "decoder.decoder_embed") if "decoder_norm" in name: name = name.replace("decoder_norm", "decoder.decoder_norm") if "decoder_pred" in name: name = name.replace("decoder_pred", "decoder.decoder_pred") if "norm.weight" in name and "decoder" not in name: name = name.replace("norm.weight", "vit.layernorm.weight") if "norm.bias" in name and "decoder" not in name: name = name.replace("norm.bias", "vit.layernorm.bias") return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if "qkv" in key: key_split = key.split(".") layer_num = int(key_split[1]) if "decoder_blocks" in key: dim = config.decoder_hidden_size prefix = "decoder.decoder_layers." if "weight" in key: orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :] elif "bias" in key: orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.bias"] = val[:dim] orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.bias"] = val[dim : dim * 2] orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.bias"] = val[-dim:] else: dim = config.hidden_size prefix = "vit.encoder.layer." if "weight" in key: orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :] elif "bias" in key: orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.bias"] = val[:dim] orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.bias"] = val[dim : dim * 2] orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.bias"] = val[-dim:] else: orig_state_dict[rename_key(key)] = val return orig_state_dict def convert_vit_mae_checkpoint(checkpoint_url, pytorch_dump_folder_path): config = ViTMAEConfig() if "large" in checkpoint_url: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 elif "huge" in checkpoint_url: config.patch_size = 14 config.hidden_size = 1280 config.intermediate_size = 5120 config.num_hidden_layers = 32 config.num_attention_heads = 16 model = ViTMAEForPreTraining(config) state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"] image_processor = ViTMAEImageProcessor(size=config.image_size) new_state_dict = convert_state_dict(state_dict, config) model.load_state_dict(new_state_dict) model.eval() url = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg" image = Image.open(requests.get(url, stream=True).raw) image_processor = ViTMAEImageProcessor(size=config.image_size) inputs = image_processor(images=image, return_tensors="pt") # forward pass torch.manual_seed(2) outputs = model(**inputs) logits = outputs.logits if "large" in checkpoint_url: expected_slice = torch.tensor( [[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] ) elif "huge" in checkpoint_url: expected_slice = torch.tensor( [[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] ) else: expected_slice = torch.tensor( [[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] ) # verify logits assert torch.allclose(logits[0, :3, :3], expected_slice, atol=1e-4) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) args = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
transformers/src/transformers/models/vit_mae/convert_vit_mae_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/vit_mae/convert_vit_mae_to_pytorch.py", "repo_id": "transformers", "token_count": 3304 }
353
# coding=utf-8 # Copyright 2023 The Kakao Enterprise Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ VITS model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) VITS_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/mms-tts-eng": "https://huggingface.co/facebook/mms-tts-eng/resolve/main/config.json", } class VitsConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VitsModel`]. It is used to instantiate a VITS model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VITS [facebook/mms-tts-eng](https://huggingface.co/facebook/mms-tts-eng) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 38): Vocabulary size of the VITS model. Defines the number of different tokens that can be represented by the `inputs_ids` passed to the forward method of [`VitsModel`]. hidden_size (`int`, *optional*, defaults to 192): Dimensionality of the text encoder layers. num_hidden_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 2): Number of attention heads for each attention layer in the Transformer encoder. window_size (`int`, *optional*, defaults to 4): Window size for the relative positional embeddings in the attention layers of the Transformer encoder. use_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the key, query, value projection layers in the Transformer encoder. ffn_dim (`int`, *optional*, defaults to 768): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. ffn_kernel_size (`int`, *optional*, defaults to 3): Kernel size of the 1D convolution layers used by the feed-forward network in the Transformer encoder. flow_size (`int`, *optional*, defaults to 192): Dimensionality of the flow layers. spectrogram_bins (`int`, *optional*, defaults to 513): Number of frequency bins in the target spectrogram. hidden_act (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings and encoder. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. use_stochastic_duration_prediction (`bool`, *optional*, defaults to `True`): Whether to use the stochastic duration prediction module or the regular duration predictor. num_speakers (`int`, *optional*, defaults to 1): Number of speakers if this is a multi-speaker model. speaker_embedding_size (`int`, *optional*, defaults to 0): Number of channels used by the speaker embeddings. Is zero for single-speaker models. upsample_initial_channel (`int`, *optional*, defaults to 512): The number of input channels into the HiFi-GAN upsampling network. upsample_rates (`Tuple[int]` or `List[int]`, *optional*, defaults to `[8, 8, 2, 2]`): A tuple of integers defining the stride of each 1D convolutional layer in the HiFi-GAN upsampling network. The length of `upsample_rates` defines the number of convolutional layers and has to match the length of `upsample_kernel_sizes`. upsample_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[16, 16, 4, 4]`): A tuple of integers defining the kernel size of each 1D convolutional layer in the HiFi-GAN upsampling network. The length of `upsample_kernel_sizes` defines the number of convolutional layers and has to match the length of `upsample_rates`. resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 7, 11]`): A tuple of integers defining the kernel sizes of the 1D convolutional layers in the HiFi-GAN multi-receptive field fusion (MRF) module. resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`): A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the HiFi-GAN multi-receptive field fusion (MRF) module. leaky_relu_slope (`float`, *optional*, defaults to 0.1): The angle of the negative slope used by the leaky ReLU activation. depth_separable_channels (`int`, *optional*, defaults to 2): Number of channels to use in each depth-separable block. depth_separable_num_layers (`int`, *optional*, defaults to 3): Number of convolutional layers to use in each depth-separable block. duration_predictor_flow_bins (`int`, *optional*, defaults to 10): Number of channels to map using the unonstrained rational spline in the duration predictor model. duration_predictor_tail_bound (`float`, *optional*, defaults to 5.0): Value of the tail bin boundary when computing the unconstrained rational spline in the duration predictor model. duration_predictor_kernel_size (`int`, *optional*, defaults to 3): Kernel size of the 1D convolution layers used in the duration predictor model. duration_predictor_dropout (`float`, *optional*, defaults to 0.5): The dropout ratio for the duration predictor model. duration_predictor_num_flows (`int`, *optional*, defaults to 4): Number of flow stages used by the duration predictor model. duration_predictor_filter_channels (`int`, *optional*, defaults to 256): Number of channels for the convolution layers used in the duration predictor model. prior_encoder_num_flows (`int`, *optional*, defaults to 4): Number of flow stages used by the prior encoder flow model. prior_encoder_num_wavenet_layers (`int`, *optional*, defaults to 4): Number of WaveNet layers used by the prior encoder flow model. posterior_encoder_num_wavenet_layers (`int`, *optional*, defaults to 16): Number of WaveNet layers used by the posterior encoder model. wavenet_kernel_size (`int`, *optional*, defaults to 5): Kernel size of the 1D convolution layers used in the WaveNet model. wavenet_dilation_rate (`int`, *optional*, defaults to 1): Dilation rates of the dilated 1D convolutional layers used in the WaveNet model. wavenet_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the WaveNet layers. speaking_rate (`float`, *optional*, defaults to 1.0): Speaking rate. Larger values give faster synthesised speech. noise_scale (`float`, *optional*, defaults to 0.667): How random the speech prediction is. Larger values create more variation in the predicted speech. noise_scale_duration (`float`, *optional*, defaults to 0.8): How random the duration prediction is. Larger values create more variation in the predicted durations. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the output audio waveform is digitalized expressed in hertz (Hz). Example: ```python >>> from transformers import VitsModel, VitsConfig >>> # Initializing a "facebook/mms-tts-eng" style configuration >>> configuration = VitsConfig() >>> # Initializing a model (with random weights) from the "facebook/mms-tts-eng" style configuration >>> model = VitsModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vits" def __init__( self, vocab_size=38, hidden_size=192, num_hidden_layers=6, num_attention_heads=2, window_size=4, use_bias=True, ffn_dim=768, layerdrop=0.1, ffn_kernel_size=3, flow_size=192, spectrogram_bins=513, hidden_act="relu", hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.1, initializer_range=0.02, layer_norm_eps=1e-5, use_stochastic_duration_prediction=True, num_speakers=1, speaker_embedding_size=0, upsample_initial_channel=512, upsample_rates=[8, 8, 2, 2], upsample_kernel_sizes=[16, 16, 4, 4], resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], leaky_relu_slope=0.1, depth_separable_channels=2, depth_separable_num_layers=3, duration_predictor_flow_bins=10, duration_predictor_tail_bound=5.0, duration_predictor_kernel_size=3, duration_predictor_dropout=0.5, duration_predictor_num_flows=4, duration_predictor_filter_channels=256, prior_encoder_num_flows=4, prior_encoder_num_wavenet_layers=4, posterior_encoder_num_wavenet_layers=16, wavenet_kernel_size=5, wavenet_dilation_rate=1, wavenet_dropout=0.0, speaking_rate=1.0, noise_scale=0.667, noise_scale_duration=0.8, sampling_rate=16_000, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.window_size = window_size self.use_bias = use_bias self.ffn_dim = ffn_dim self.layerdrop = layerdrop self.ffn_kernel_size = ffn_kernel_size self.flow_size = flow_size self.spectrogram_bins = spectrogram_bins self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_stochastic_duration_prediction = use_stochastic_duration_prediction self.num_speakers = num_speakers self.speaker_embedding_size = speaker_embedding_size self.upsample_initial_channel = upsample_initial_channel self.upsample_rates = upsample_rates self.upsample_kernel_sizes = upsample_kernel_sizes self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes self.leaky_relu_slope = leaky_relu_slope self.depth_separable_channels = depth_separable_channels self.depth_separable_num_layers = depth_separable_num_layers self.duration_predictor_flow_bins = duration_predictor_flow_bins self.duration_predictor_tail_bound = duration_predictor_tail_bound self.duration_predictor_kernel_size = duration_predictor_kernel_size self.duration_predictor_dropout = duration_predictor_dropout self.duration_predictor_num_flows = duration_predictor_num_flows self.duration_predictor_filter_channels = duration_predictor_filter_channels self.prior_encoder_num_flows = prior_encoder_num_flows self.prior_encoder_num_wavenet_layers = prior_encoder_num_wavenet_layers self.posterior_encoder_num_wavenet_layers = posterior_encoder_num_wavenet_layers self.wavenet_kernel_size = wavenet_kernel_size self.wavenet_dilation_rate = wavenet_dilation_rate self.wavenet_dropout = wavenet_dropout self.speaking_rate = speaking_rate self.noise_scale = noise_scale self.noise_scale_duration = noise_scale_duration self.sampling_rate = sampling_rate if len(upsample_kernel_sizes) != len(upsample_rates): raise ValueError( f"The length of `upsample_kernel_sizes` ({len(upsample_kernel_sizes)}) must match the length of " f"`upsample_rates` ({len(upsample_rates)})" ) super().__init__(**kwargs)
transformers/src/transformers/models/vits/configuration_vits.py/0
{ "file_path": "transformers/src/transformers/models/vits/configuration_vits.py", "repo_id": "transformers", "token_count": 5431 }
354
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Wav2Vec2 model.""" import math import warnings from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import ( BaseModelOutput, CausalLMOutput, MaskedLMOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import is_torch_greater_or_equal_than_1_13 from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_file, is_peft_available, is_safetensors_available, logging, replace_return_docstrings, ) from .configuration_wav2vec2 import Wav2Vec2Config WAV2VEC2_ADAPTER_PT_FILE = "adapter.{}.bin" WAV2VEC2_ADAPTER_SAFE_FILE = "adapter.{}.safetensors" if is_safetensors_available(): from safetensors.torch import load_file as safe_load_file logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 2 # General docstring _CONFIG_FOR_DOC = "Wav2Vec2Config" # Base docstring _CHECKPOINT_FOR_DOC = "facebook/wav2vec2-base-960h" _EXPECTED_OUTPUT_SHAPE = [1, 292, 768] # CTC docstring _CTC_EXPECTED_OUTPUT = "'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'" _CTC_EXPECTED_LOSS = 53.48 # Audio class docstring _SEQ_CLASS_CHECKPOINT = "superb/wav2vec2-base-superb-ks" _SEQ_CLASS_EXPECTED_OUTPUT = "'_unknown_'" _SEQ_CLASS_EXPECTED_LOSS = 6.54 # Frame class docstring _FRAME_CLASS_CHECKPOINT = "anton-l/wav2vec2-base-superb-sd" _FRAME_EXPECTED_OUTPUT = [0, 0] # Speaker Verification docstring _XVECTOR_CHECKPOINT = "anton-l/wav2vec2-base-superb-sv" _XVECTOR_EXPECTED_OUTPUT = 0.98 WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/wav2vec2-base-960h", "facebook/wav2vec2-large-960h", "facebook/wav2vec2-large-960h-lv60", "facebook/wav2vec2-large-960h-lv60-self", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 ] @dataclass class Wav2Vec2ForPreTrainingOutput(ModelOutput): """ Output type of [`Wav2Vec2ForPreTraining`], with potential hidden states and attentions. Args: loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss. projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. contrastive_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`): The contrastive loss (L_m) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) . diversity_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`): The diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) . """ loss: Optional[torch.FloatTensor] = None projected_states: torch.FloatTensor = None projected_quantized_states: torch.FloatTensor = None codevector_perplexity: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None contrastive_loss: Optional[torch.FloatTensor] = None diversity_loss: Optional[torch.FloatTensor] = None def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" f" and `sequence_length`: {sequence_length}`" ) # epsilon is used for probabilistic rounding epsilon = np.random.rand(1).item() def compute_num_masked_span(input_length): """Given input length, compute how many spans should be masked""" num_masked_span = int(mask_prob * input_length / mask_length + epsilon) num_masked_span = max(num_masked_span, min_masks) # make sure num masked span <= sequence_length if num_masked_span * mask_length > sequence_length: num_masked_span = sequence_length // mask_length # make sure num_masked span is also <= input_length - (mask_length - 1) if input_length - (mask_length - 1) < num_masked_span: num_masked_span = max(input_length - (mask_length - 1), 0) return num_masked_span # compute number of masked spans in batch input_lengths = ( attention_mask.sum(-1).detach().tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)] ) # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) spec_aug_mask_idxs = [] max_num_masked_span = compute_num_masked_span(sequence_length) if max_num_masked_span == 0: return spec_aug_mask for input_length in input_lengths: # compute num of masked spans for this input num_masked_span = compute_num_masked_span(input_length) # get random indices to mask spec_aug_mask_idx = np.random.choice( np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False ) # pick first sampled index that will serve as a dummy index to pad vector # to ensure same dimension for all batches due to probabilistic rounding # Picking first sample just pads those vectors twice. if len(spec_aug_mask_idx) == 0: # this case can only happen if `input_length` is strictly smaller then # `sequence_length` in which case the last token has to be a padding # token which we can use as a dummy mask id dummy_mask_idx = sequence_length - 1 else: dummy_mask_idx = spec_aug_mask_idx[0] spec_aug_mask_idx = np.concatenate( [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] ) spec_aug_mask_idxs.append(spec_aug_mask_idx) spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to( spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) ) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) # add offset to the starting indexes so that indexes now create a span offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( batch_size, max_num_masked_span * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # ensure that we cannot have indices larger than sequence_length if spec_aug_mask_idxs.max() > sequence_length - 1: spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) return spec_aug_mask def _sample_negative_indices( features_shape: Tuple, num_negatives: int, mask_time_indices: Optional[np.ndarray] = None ): """ Sample `num_negatives` vectors from feature vectors. """ batch_size, sequence_length = features_shape # generate indices of the positive vectors themselves, repeat them `num_negatives` times sequence_length_range = np.arange(sequence_length) # get `num_negatives` random vector indices from the same utterance sampled_negative_indices = np.zeros(shape=(batch_size, sequence_length, num_negatives), dtype=np.int32) mask_time_indices = ( mask_time_indices.astype(bool) if mask_time_indices is not None else np.ones(features_shape, dtype=bool) ) for batch_idx in range(batch_size): high = mask_time_indices[batch_idx].sum() - 1 mapped_masked_indices = sequence_length_range[mask_time_indices[batch_idx]] feature_indices = np.broadcast_to(np.arange(high + 1)[:, None], (high + 1, num_negatives)) sampled_indices = np.random.randint(0, high, size=(high + 1, num_negatives)) # avoid sampling the same positive vector, but keep the distribution uniform sampled_indices[sampled_indices >= feature_indices] += 1 # remap to actual indices sampled_negative_indices[batch_idx][mask_time_indices[batch_idx]] = mapped_masked_indices[sampled_indices] # correct for batch size sampled_negative_indices[batch_idx] += batch_idx * sequence_length return sampled_negative_indices class Wav2Vec2NoLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class Wav2Vec2LayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states class Wav2Vec2GroupNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class Wav2Vec2PositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups, ) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = weight_norm(self.conv, name="weight", dim=2) deepspeed.zero.register_external_parameter(self, self.conv.weight_v) deepspeed.zero.register_external_parameter(self, self.conv.weight_g) else: self.conv = weight_norm(self.conv, name="weight", dim=2) self.padding = Wav2Vec2SamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states class Wav2Vec2SamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, : -self.num_pad_remove] return hidden_states class Wav2Vec2FeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" def __init__(self, config): super().__init__() if config.feat_extract_norm == "group": conv_layers = [Wav2Vec2GroupNormConvLayer(config, layer_id=0)] + [ Wav2Vec2NoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [ Wav2Vec2LayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers) ] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] # make sure hidden_states require grad for gradient_checkpointing if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: if self._requires_grad and self.gradient_checkpointing and self.training: hidden_states = self._gradient_checkpointing_func( conv_layer.__call__, hidden_states, ) else: hidden_states = conv_layer(hidden_states) return hidden_states class Wav2Vec2FeatureExtractor(Wav2Vec2FeatureEncoder): def __init__(self, config): super().__init__(config) warnings.warn( f"The class `{self.__class__.__name__}` has been depreciated " "and will be removed in Transformers v5. " f"Use `{self.__class__.__bases__[0].__name__}` instead.", FutureWarning, ) class Wav2Vec2FeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states, norm_hidden_states # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Wav2Vec2 class Wav2Vec2Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[Wav2Vec2Config] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class Wav2Vec2FeedForward(nn.Module): def __init__(self, config): super().__init__() self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states class Wav2Vec2EncoderLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = Wav2Vec2Attention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = Wav2Vec2FeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, output_attentions=False): attn_residual = hidden_states hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class Wav2Vec2EncoderLayerStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.attention = Wav2Vec2Attention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = Wav2Vec2FeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if getattr(config, "adapter_attn_dim", None) is not None: self.adapter_layer = Wav2Vec2AttnAdapterLayer(config) else: self.adapter_layer = None def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) if self.adapter_layer is not None: hidden_states = hidden_states + self.adapter_layer(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class Wav2Vec2Encoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = Wav2Vec2PositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([Wav2Vec2EncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens output 0 expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 # extend attention_mask attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand( attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class Wav2Vec2EncoderStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = Wav2Vec2PositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList( [Wav2Vec2EncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens are not attended to expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 # extend attention_mask attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand( attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class Wav2Vec2GumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information. """ def __init__(self, config): super().__init__() self.num_groups = config.num_codevector_groups self.num_vars = config.num_codevectors_per_group if config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {config.codevector_dim} must be divisible " f"by `config.num_codevector_groups` {self.num_groups} for concatenation" ) # storage for codebook variables (codewords) self.codevectors = nn.Parameter( torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups) ) self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars) # can be decayed for training self.temperature = 2 @staticmethod def _compute_perplexity(probs, mask=None): if mask is not None: mask_extended = mask.flatten()[:, None, None].expand(probs.shape) probs = torch.where(mask_extended, probs, torch.zeros_like(probs)) marginal_probs = probs.sum(dim=0) / mask.sum() else: marginal_probs = probs.mean(dim=0) perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum() return perplexity def forward(self, hidden_states, mask_time_indices=None): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1) if self.training: # sample code vector probs via gumbel in differentiateable way codevector_probs = nn.functional.gumbel_softmax( hidden_states.float(), tau=self.temperature, hard=True ).type_as(hidden_states) # compute perplexity codevector_soft_dist = torch.softmax( hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist, mask_time_indices) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(dim=-1) codevector_probs = hidden_states.new_zeros(hidden_states.shape).scatter_( -1, codevector_idx.view(-1, 1), 1.0 ) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs, mask_time_indices) codevector_probs = codevector_probs.view(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1) return codevectors, perplexity class Wav2Vec2Adapter(nn.Module): def __init__(self, config): super().__init__() # feature dim might need to be down-projected if config.output_hidden_size != config.hidden_size: self.proj = nn.Linear(config.hidden_size, config.output_hidden_size) self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size) else: self.proj = self.proj_layer_norm = None self.layers = nn.ModuleList(Wav2Vec2AdapterLayer(config) for _ in range(config.num_adapter_layers)) self.layerdrop = config.layerdrop def forward(self, hidden_states): # down project hidden_states if necessary if self.proj is not None and self.proj_layer_norm is not None: hidden_states = self.proj(hidden_states) hidden_states = self.proj_layer_norm(hidden_states) hidden_states = hidden_states.transpose(1, 2) for layer in self.layers: layerdrop_prob = np.random.random() if not self.training or (layerdrop_prob > self.layerdrop): hidden_states = layer(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states class Wav2Vec2AdapterLayer(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.output_hidden_size, 2 * config.output_hidden_size, config.adapter_kernel_size, stride=config.adapter_stride, padding=1, ) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = nn.functional.glu(hidden_states, dim=1) return hidden_states class Wav2Vec2AttnAdapterLayer(nn.Module): def __init__(self, config): """ Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed up training throughput. """ super().__init__() self.input_dim = config.adapter_attn_dim self.hidden_dim = config.hidden_size self.norm = nn.LayerNorm(self.hidden_dim) self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim) self.act_fn = nn.ReLU() self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim) def forward(self, hidden_states: torch.FloatTensor): hidden_states = self.norm(hidden_states) hidden_states = self.linear_1(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states class Wav2Vec2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Wav2Vec2Config base_model_prefix = "wav2vec2" main_input_name = "input_values" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" # Wav2Vec2ForPreTraining last 2 linear layers need standard Linear init. if isinstance(module, Wav2Vec2ForPreTraining): module.project_hid.reset_parameters() module.project_q.reset_parameters() module.project_hid._is_hf_initialized = True module.project_q._is_hf_initialized = True # gumbel softmax requires special init elif isinstance(module, Wav2Vec2GumbelVectorQuantizer): module.weight_proj.weight.data.normal_(mean=0.0, std=1) module.weight_proj.bias.data.zero_() nn.init.uniform_(module.codevectors) elif isinstance(module, Wav2Vec2PositionalConvEmbedding): nn.init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) nn.init.constant_(module.conv.bias, 0) elif isinstance(module, Wav2Vec2FeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) def _get_feat_extract_output_lengths( self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths def _get_feature_vector_attention_mask( self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None ): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) output_lengths = output_lengths.to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask def _get_adapters(self): if self.config.adapter_attn_dim is None: raise ValueError(f"{self.__class__} has no adapter layers. Make sure to define `config.adapter_attn_dim`.") adapter_weights = {} for name, module in self.named_modules(): if isinstance(module, Wav2Vec2AttnAdapterLayer): for param_name, param in module.named_parameters(): adapter_weights[".".join([name, param_name])] = param if isinstance(self, Wav2Vec2ForCTC): for name, param in self.lm_head.named_parameters(): adapter_weights[".".join(["lm_head", name])] = param return adapter_weights def init_adapter_layers(self): """ (Re-)initialize attention adapter layers and lm head for adapter-only fine-tuning """ # init attention adapters for module in self.modules(): if isinstance(module, Wav2Vec2AttnAdapterLayer): self._init_weights(module) # init lm head if isinstance(self, Wav2Vec2ForCTC): self._init_weights(self.lm_head) def load_adapter(self, target_lang: str, force_load=True, **kwargs): r""" Load a language adapter model from a pre-trained adapter model. Parameters: target_lang (`str`): Has to be a language id of an existing adapter weight. Adapter weights are stored in the format adapter.<lang>.safetensors or adapter.<lang>.bin force_load (`bool`, defaults to `True`): Whether the weights shall be loaded even if `target_lang` matches `self.target_lang`. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>". </Tip> mirror (`str`, *optional*): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. <Tip> Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> Examples: ```python >>> from transformers import Wav2Vec2ForCTC, AutoProcessor >>> ckpt = "facebook/mms-1b-all" >>> processor = AutoProcessor.from_pretrained(ckpt) >>> model = Wav2Vec2ForCTC.from_pretrained(ckpt, target_lang="eng") >>> # set specific language >>> processor.tokenizer.set_target_lang("spa") >>> model.load_adapter("spa") ``` """ if self.config.adapter_attn_dim is None: raise ValueError(f"Cannot load_adapter for {target_lang} if `config.adapter_attn_dim` is not defined.") if target_lang == self.target_lang and not force_load: logger.warning(f"Adapter weights are already set to {target_lang}.") return cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) token = kwargs.pop("token", None) use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token model_path_or_id = self.config._name_or_path state_dict = None # 1. Let's first try loading a safetensors adapter weight if use_safetensors is not False: filepath = WAV2VEC2_ADAPTER_SAFE_FILE.format(target_lang) try: weight_path = cached_file( model_path_or_id, filename=filepath, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, cache_dir=cache_dir, ) state_dict = safe_load_file(weight_path) except EnvironmentError: if use_safetensors: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted # to the original exception. raise except Exception: # For any other exception, we throw a generic error. if use_safetensors: raise EnvironmentError( f"Can't load the model for '{model_path_or_id}'. If you were trying to load it" " from 'https://huggingface.co/models', make sure you don't have a local directory with the" f" same name. Otherwise, make sure '{model_path_or_id}' is the correct path to a" f" directory containing a file named {filepath}." ) # 2. If this didn't work let's try loading a PyTorch adapter weight if state_dict is None: filepath = WAV2VEC2_ADAPTER_PT_FILE.format(target_lang) try: weight_path = cached_file( model_path_or_id, filename=filepath, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, cache_dir=cache_dir, ) weights_only_kwarg = {"weights_only": True} if is_torch_greater_or_equal_than_1_13 else {} state_dict = torch.load( weight_path, map_location="cpu", **weights_only_kwarg, ) except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted # to the original exception. raise except Exception: # For any other exception, we throw a generic error. raise EnvironmentError( f"Can't load the model for '{model_path_or_id}'. If you were trying to load it" " from 'https://huggingface.co/models', make sure you don't have a local directory with the" f" same name. Otherwise, make sure '{model_path_or_id}' is the correct path to a" f" directory containing a file named {filepath}." ) adapter_weights = self._get_adapters() unexpected_keys = set(state_dict.keys()) - set(adapter_weights.keys()) missing_keys = set(adapter_weights.keys()) - set(state_dict.keys()) if len(unexpected_keys) > 0: raise ValueError(f"The adapter weights {weight_path} has unexpected keys: {', '.join(unexpected_keys)}.") elif len(missing_keys) > 0: raise ValueError(f"The adapter weights {weight_path} has missing keys: {', '.join(missing_keys)}.") # make sure now vocab size is correct target_vocab_size = state_dict["lm_head.weight"].shape[0] if target_vocab_size != self.config.vocab_size: self.lm_head = nn.Linear( self.config.output_hidden_size, target_vocab_size, device=self.device, dtype=self.dtype ) self.config.vocab_size = target_vocab_size # make sure that adapter weights are put in exactly the same precision and device placement and overwritten adapter weights state_dict = {k: v.to(adapter_weights[k]) for k, v in state_dict.items()} self.load_state_dict(state_dict, strict=False) # set target language corectly self.target_lang = target_lang WAV_2_VEC_2_START_DOCSTRING = r""" Wav2Vec2 was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.). This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`Wav2Vec2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ WAV_2_VEC_2_INPUTS_DOCSTRING = r""" Args: input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) <Tip warning={true}> `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == True`. For all models whose processor has `config.return_attention_mask == False`, such as [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different results depending on whether `input_values` is padded or not. </Tip> output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top.", WAV_2_VEC_2_START_DOCSTRING, ) class Wav2Vec2Model(Wav2Vec2PreTrainedModel): def __init__(self, config: Wav2Vec2Config): super().__init__(config) self.config = config self.feature_extractor = Wav2Vec2FeatureEncoder(config) self.feature_projection = Wav2Vec2FeatureProjection(config) # model only needs masking vector if mask prob is > 0.0 if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_()) if config.do_stable_layer_norm: self.encoder = Wav2Vec2EncoderStableLayerNorm(config) else: self.encoder = Wav2Vec2Encoder(config) self.adapter = Wav2Vec2Adapter(config) if config.add_adapter else None # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.feature_extractor._freeze_parameters() def _mask_hidden_states( self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://arxiv.org/abs/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states # generate indices & apply SpecAugment along time axis batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Wav2Vec2BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Wav2Vec2BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict extract_features = self.feature_extractor(input_values) extract_features = extract_features.transpose(1, 2) if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask( extract_features.shape[1], attention_mask, add_adapter=False ) hidden_states, extract_features = self.feature_projection(extract_features) hidden_states = self._mask_hidden_states( hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if self.adapter is not None: hidden_states = self.adapter(hidden_states) if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return Wav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings("""Wav2Vec2 Model with a quantizer and `VQ` head on top.""", WAV_2_VEC_2_START_DOCSTRING) class Wav2Vec2ForPreTraining(Wav2Vec2PreTrainedModel): def __init__(self, config: Wav2Vec2Config): super().__init__(config) self.wav2vec2 = Wav2Vec2Model(config) self.dropout_features = nn.Dropout(config.feat_quantizer_dropout) self.quantizer = Wav2Vec2GumbelVectorQuantizer(config) self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim) self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim) # Initialize weights and apply final processing self.post_init() def set_gumbel_temperature(self, temperature: int): """ Set the Gumbel softmax temperature to a given value. Only necessary for training """ self.quantizer.temperature = temperature def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wav2vec2.feature_extractor._freeze_parameters() @staticmethod def compute_contrastive_logits( target_features: torch.FloatTensor, negative_features: torch.FloatTensor, predicted_features: torch.FloatTensor, temperature: int = 0.1, ): """ Compute logits for contrastive loss based using cosine similarity as the distance measure between `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied. """ target_features = torch.cat([target_features, negative_features], dim=0) logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1).type_as( target_features ) # apply temperature logits = logits / temperature return logits @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Wav2Vec2ForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.BoolTensor] = None, sampled_negative_indices: Optional[torch.BoolTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Wav2Vec2ForPreTrainingOutput]: r""" mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. sampled_negative_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_negatives)`, *optional*): Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss. Required input for pre-training. Returns: Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, Wav2Vec2ForPreTraining >>> from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices, _sample_negative_indices >>> from datasets import load_dataset >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") >>> model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> input_values = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt").input_values # Batch size 1 >>> # compute masked indices >>> batch_size, raw_sequence_length = input_values.shape >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length).item() >>> mask_time_indices = _compute_mask_indices( ... shape=(batch_size, sequence_length), mask_prob=0.2, mask_length=2 ... ) >>> sampled_negative_indices = _sample_negative_indices( ... features_shape=(batch_size, sequence_length), ... num_negatives=model.config.num_negatives, ... mask_time_indices=mask_time_indices, ... ) >>> mask_time_indices = torch.tensor(data=mask_time_indices, device=input_values.device, dtype=torch.long) >>> sampled_negative_indices = torch.tensor( ... data=sampled_negative_indices, device=input_values.device, dtype=torch.long ... ) >>> with torch.no_grad(): ... outputs = model(input_values, mask_time_indices=mask_time_indices) >>> # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states) >>> cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) >>> # show that cosine similarity is much higher than random >>> cosine_sim[mask_time_indices.to(torch.bool)].mean() > 0.5 tensor(True) >>> # for contrastive loss training model should be put into train mode >>> model = model.train() >>> loss = model( ... input_values, mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices ... ).loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if mask_time_indices is not None: mask_time_indices = mask_time_indices.to(torch.bool) outputs = self.wav2vec2( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, mask_time_indices=mask_time_indices, return_dict=return_dict, ) # 1. project all transformed features (including masked) to final vq dim transformer_features = self.project_hid(outputs[0]) # 2. quantize all (unmasked) extracted features and project to final vq dim extract_features = self.dropout_features(outputs[1]) if attention_mask is not None: # compute reduced attention_mask correponding to feature vectors attention_mask = self._get_feature_vector_attention_mask( extract_features.shape[1], attention_mask, add_adapter=False ) quantized_features, codevector_perplexity = self.quantizer( extract_features, mask_time_indices=mask_time_indices ) quantized_features = self.project_q(quantized_features) loss = contrastive_loss = diversity_loss = None if sampled_negative_indices is not None: batch_size, sequence_length, hidden_size = quantized_features.shape # for training, we sample negatives # 3. sample K negatives (distractors) quantized states for contrastive loss # if attention_mask is passed, make sure that padded feature vectors cannot be sampled # sample negative quantized vectors BTC => (BxT)C negative_quantized_features = quantized_features.view(-1, hidden_size)[ sampled_negative_indices.long().view(-1) ] negative_quantized_features = negative_quantized_features.view( batch_size, sequence_length, -1, hidden_size ).permute(2, 0, 1, 3) # 4. compute logits, corresponding to `logs = sim(c_t, [q_t, \sim{q}_t]) / \kappa` # of equation (3) in https://arxiv.org/pdf/2006.11477.pdf logits = self.compute_contrastive_logits( quantized_features[None, :], negative_quantized_features, transformer_features, self.config.contrastive_logits_temperature, ) # 5. if a negative vector is identical to the positive (i.e. when codebook utilization is low), # its cosine similarity will be masked neg_is_pos = (quantized_features == negative_quantized_features).all(-1) if neg_is_pos.any(): logits[1:][neg_is_pos] = float("-inf") # 6. compute contrastive loss \mathbf{L}_m = cross_entropy(logs) = # -log(exp(sim(c_t, q_t)/\kappa) / \sum_{\sim{q}} exp(sim(c_t, \sim{q})/\kappa)) logits = logits.transpose(0, 2).reshape(-1, logits.size(0)) target = ((1 - mask_time_indices.long()) * -100).transpose(0, 1).flatten() contrastive_loss = nn.functional.cross_entropy(logits.float(), target, reduction="sum") # 7. compute diversity loss: \mathbf{L}_d num_codevectors = self.config.num_codevectors_per_group * self.config.num_codevector_groups diversity_loss = ((num_codevectors - codevector_perplexity) / num_codevectors) * mask_time_indices.sum() # 8. \mathbf{L} = \mathbf{L}_m + \alpha * \mathbf{L}_d loss = contrastive_loss + self.config.diversity_loss_weight * diversity_loss if not return_dict: if loss is not None: return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return Wav2Vec2ForPreTrainingOutput( loss=loss, projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions, contrastive_loss=contrastive_loss, diversity_loss=diversity_loss, ) @add_start_docstrings("""Wav2Vec2 Model with a `language modeling` head on top.""", WAV_2_VEC_2_START_DOCSTRING) class Wav2Vec2ForMaskedLM(Wav2Vec2PreTrainedModel): def __init__(self, config): super().__init__(config) warnings.warn( "The class `Wav2Vec2ForMaskedLM` is deprecated. Please use `Wav2Vec2ForCTC` instead.", FutureWarning ) self.wav2vec2 = Wav2Vec2Model(config) self.dropout = nn.Dropout(config.final_dropout) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) def forward( self, input_values: torch.FloatTensor, attention_mask: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.wav2vec2( input_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) if not return_dict: output = (logits,) + outputs[2:] return output return MaskedLMOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings( """Wav2Vec2 Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", WAV_2_VEC_2_START_DOCSTRING, """ target_lang (`str`, *optional*): Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or adapter.<lang>.bin. Only relevant when using an instance of [`Wav2Vec2ForCTC`] with adapters. Uses 'eng' by default. """, ) class Wav2Vec2ForCTC(Wav2Vec2PreTrainedModel): def __init__(self, config, target_lang: Optional[str] = None): super().__init__(config) self.wav2vec2 = Wav2Vec2Model(config) self.dropout = nn.Dropout(config.final_dropout) self.target_lang = target_lang if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that " "does not define the vocabulary size of the language model head. Please " "instantiate the model as follows: `Wav2Vec2ForCTC.from_pretrained(..., vocab_size=vocab_size)`. " "or define `vocab_size` of your model's configuration." ) output_hidden_size = ( config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size ) self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() def tie_weights(self): """ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when passing `target_lang=...` to `from_pretrained(...)`. This method is **not** supposed to be called by the user and is prone to be changed in the future. """ # Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to # correctly load adapter layers for Wav2Vec2 so that we do not have to introduce a new API to # [`PreTrainedModel`]. While slightly hacky, Wav2Vec2 never has to tie input and output embeddings, so that it is # ok to repurpose this function here. target_lang = self.target_lang if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None: raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.") elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None: logger.info("By default `target_lang` is set to 'eng'.") elif target_lang is not None: self.load_adapter(target_lang, force_load=True) def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wav2vec2.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wav2vec2.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.wav2vec2( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: if labels.max() >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long) ) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) @add_start_docstrings( """ Wav2Vec2 Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting. """, WAV_2_VEC_2_START_DOCSTRING, ) class Wav2Vec2ForSequenceClassification(Wav2Vec2PreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Sequence classification does not support the use of Wav2Vec2 adapters (config.add_adapter=True)" ) self.wav2vec2 = Wav2Vec2Model(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wav2vec2.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wav2vec2.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_SEQ_CLASS_CHECKPOINT, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, expected_loss=_SEQ_CLASS_EXPECTED_LOSS, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wav2vec2( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) hidden_states[~padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Wav2Vec2 Model with a frame classification head on top for tasks like Speaker Diarization. """, WAV_2_VEC_2_START_DOCSTRING, ) class Wav2Vec2ForAudioFrameClassification(Wav2Vec2PreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Audio frame classification does not support the use of Wav2Vec2 adapters (config.add_adapter=True)" ) self.wav2vec2 = Wav2Vec2Model(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.num_labels = config.num_labels self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wav2vec2.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wav2vec2.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_FRAME_CLASS_CHECKPOINT, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_FRAME_EXPECTED_OUTPUT, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wav2vec2( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class AMSoftmaxLoss(nn.Module): def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4): super(AMSoftmaxLoss, self).__init__() self.scale = scale self.margin = margin self.num_labels = num_labels self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True) self.loss = nn.CrossEntropyLoss() def forward(self, hidden_states, labels): labels = labels.flatten() weight = nn.functional.normalize(self.weight, dim=0) hidden_states = nn.functional.normalize(hidden_states, dim=1) cos_theta = torch.mm(hidden_states, weight) psi = cos_theta - self.margin onehot = nn.functional.one_hot(labels, self.num_labels) logits = self.scale * torch.where(onehot.bool(), psi, cos_theta) loss = self.loss(logits, labels) return loss class TDNNLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id] self.out_conv_dim = config.tdnn_dim[layer_id] self.kernel_size = config.tdnn_kernel[layer_id] self.dilation = config.tdnn_dilation[layer_id] self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim) self.activation = nn.ReLU() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if is_peft_available(): from peft.tuners.lora import LoraLayer if isinstance(self.kernel, LoraLayer): warnings.warn( "Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. " "You should exclude TDNNLayer from LoRA's target modules.", ) # for backward compatibility, we keep nn.Linear but call F.conv1d for speed up hidden_states = hidden_states.transpose(1, 2) weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2) hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.activation(hidden_states) return hidden_states @add_start_docstrings( """ Wav2Vec2 Model with an XVector feature extraction head on top for tasks like Speaker Verification. """, WAV_2_VEC_2_START_DOCSTRING, ) class Wav2Vec2ForXVector(Wav2Vec2PreTrainedModel): def __init__(self, config): super().__init__(config) self.wav2vec2 = Wav2Vec2Model(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0]) tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))] self.tdnn = nn.ModuleList(tdnn_layers) self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim) self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim) self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels) self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wav2vec2.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wav2vec2.parameters(): param.requires_grad = False def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the TDNN layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size in self.config.tdnn_kernel: input_lengths = _conv_out_length(input_lengths, kernel_size, 1) return input_lengths @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_XVECTOR_CHECKPOINT, output_type=XVectorOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_XVECTOR_EXPECTED_OUTPUT, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, XVectorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wav2vec2( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) for tdnn_layer in self.tdnn: hidden_states = tdnn_layer(hidden_states) # Statistic Pooling if attention_mask is None: mean_features = hidden_states.mean(dim=1) std_features = hidden_states.std(dim=1) else: feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1)) tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths) mean_features = [] std_features = [] for i, length in enumerate(tdnn_output_lengths): mean_features.append(hidden_states[i, :length].mean(dim=0)) std_features.append(hidden_states[i, :length].std(dim=0)) mean_features = torch.stack(mean_features) std_features = torch.stack(std_features) statistic_pooling = torch.cat([mean_features, std_features], dim=-1) output_embeddings = self.feature_extractor(statistic_pooling) logits = self.classifier(output_embeddings) loss = None if labels is not None: loss = self.objective(logits, labels) if not return_dict: output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return XVectorOutput( loss=loss, logits=logits, embeddings=output_embeddings, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/wav2vec2/modeling_wav2vec2.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2/modeling_wav2vec2.py", "repo_id": "transformers", "token_count": 46050 }
355
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Whisper.""" import json import os import re import warnings from functools import lru_cache from typing import List, Optional, Tuple import numpy as np from tokenizers import AddedToken, pre_tokenizers, processors from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .english_normalizer import BasicTextNormalizer, EnglishTextNormalizer from .tokenization_whisper import LANGUAGES, TASK_IDS, TO_LANGUAGE_CODE, WhisperTokenizer, _decode_asr logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "tokenizer_file": "tokenizer.json", "merges_file": "merges.txt", "normalizer_file": "normalizer.json", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "openai/whisper-tiny": "https://huggingface.co/openai/whisper-tiny/resolve/main/vocab.json", "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/vocab.json", "openai/whisper-small": "https://huggingface.co/openai/whisper-small/resolve/main/vocab.json", "openai/whisper-medium": "https://huggingface.co/openai/whisper-medium/resolve/main/vocab.json", "openai/whisper-large": "https://huggingface.co/openai/whisper-large/resolve/main/vocab.json", "openai/whisper-tiny.en": "https://huggingface.co/openai/whisper-tiny.en/resolve/main/vocab.json", "openai/whisper-base.en": "https://huggingface.co/openai/whisper-base.en/resolve/main/vocab.json", "openai/whisper-small.en": "https://huggingface.co/openai/whisper-small.en/resolve/main/vocab.json", "openai/whisper-medium.en": "https://huggingface.co/openai/whisper-medium.en/resolve/main/vocab.json", }, "merges_file": { "openai/whisper-tiny": "https://huggingface.co/openai/whisper-tiny/resolve/main/merges.txt", "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/merges.txt", "openai/whisper-small": "https://huggingface.co/openai/whisper-small/resolve/main/merges.txt", "openai/whisper-medium": "https://huggingface.co/openai/whisper-medium/resolve/main/merges.txt", "openai/whisper-large": "https://huggingface.co/openai/whisper-large/resolve/main/merges.txt", "openai/whisper-tiny.en": "https://huggingface.co/openai/whisper-tiny.en/resolve/main/merges.txt", "openai/whisper-base.en": "https://huggingface.co/openai/whisper-base.en/resolve/main/merges.txt", "openai/whisper-small.en": "https://huggingface.co/openai/whisper-small.en/resolve/main/merges.txt", "openai/whisper-medium.en": "https://huggingface.co/openai/whisper-medium.en/resolve/main/merges.txt", }, "tokenizer_file": { "openai/whisper-tiny": "https://huggingface.co/openai/whisper-tiny/resolve/main/tokenizer.json", "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/tokenizer.json", "openai/whisper-small": "https://huggingface.co/openai/whisper-small/resolve/main/tokenizer.json", "openai/whisper-medium": "https://huggingface.co/openai/whisper-medium/resolve/main/tokenizer.json", "openai/whisper-large": "https://huggingface.co/openai/whisper-large/resolve/main/tokenizer.json", "openai/whisper-tiny.en": "https://huggingface.co/openai/whisper-tiny.en/resolve/main/tokenizer.json", "openai/whisper-base.en": "https://huggingface.co/openai/whisper-base.en/resolve/main/tokenizer.json", "openai/whisper-small.en": "https://huggingface.co/openai/whisper-small.en/resolve/main/tokenizer.json", "openai/whisper-medium.en": "https://huggingface.co/openai/whisper-medium.en/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "openai/whisper-tiny": 1500, "openai/whisper-base": 1500, "openai/whisper-small": 1500, "openai/whisper-medium": 1500, "openai/whisper-large": 1500, "openai/whisper-tiny.en": 1500, "openai/whisper-base.en": 1500, "openai/whisper-small.en": 1500, "openai/whisper-medium.en": 1500, } class WhisperTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" Whisper tokenizer (backed by HuggingFace's *tokenizers* library). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): Path to the vocabulary file. merges_file (`str`, *optional*): Path to the merges file. normalizer_file (`str`, *optional*): Path to the normalizer_file file. tokenizer_file (`str`, *optional*): Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The beginning of sequence token. The `decoder_start_token_id` is used to set the first token as `"<|startoftranscript|>"` when generating. eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The end of sequence token. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (Whisper tokenizer detect beginning of words by the preceding space). language (`str`, *optional*): The language of the transcription text. The corresponding language id token is appended to the start of the sequence for multilingual speech recognition and speech translation tasks, e.g. for Spanish the token `"<|es|>"` is appended to the start of sequence. This should be used for multilingual fine-tuning only. task (`str`, *optional*): Task identifier to append at the start of sequence (if any). This should be used for mulitlingual fine-tuning, with `"transcribe"` for speech recognition and `"translate"` for speech translation. predict_timestamps (`bool`, *optional*, defaults to `False`): Whether to omit the `<|notimestamps|>` token at the start of the sequence. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = WhisperTokenizer def __init__( self, vocab_file=None, merges_file=None, normalizer_file=None, tokenizer_file=None, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, language=None, task=None, predict_timestamps=False, **kwargs, ): bos_token = ( AddedToken(bos_token, lstrip=False, rstrip=False, normalized=False, special=True) if isinstance(bos_token, str) else bos_token ) eos_token = ( AddedToken(eos_token, lstrip=False, rstrip=False, normalized=False, special=True) if isinstance(eos_token, str) else eos_token ) unk_token = ( AddedToken(unk_token, lstrip=False, rstrip=False, normalized=False, special=True) if isinstance(unk_token, str) else unk_token ) super().__init__( vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_prefix_space=add_prefix_space, **kwargs, ) self.add_bos_token = kwargs.pop("add_bos_token", False) pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) pre_tok_state["add_prefix_space"] = add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) if normalizer_file is not None: with open(normalizer_file, encoding="utf-8") as vocab_handle: self.english_spelling_normalizer = json.load(vocab_handle) else: self.english_spelling_normalizer = None self.add_prefix_space = add_prefix_space self.timestamp_pat = re.compile(r"<\|(\d+\.\d+)\|>") self.language = language self.task = task self.predict_timestamps = predict_timestamps # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast._batch_encode_plus def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get("is_split_into_words", False) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*args, **kwargs) # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast._encode_plus def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get("is_split_into_words", False) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*args, **kwargs) # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._decode_with_timestamps def _decode_with_timestamps(self, token_ids, skip_special_tokens=False, time_precision=0.02) -> str: """ Timestamp tokens are above the special tokens' id range and are ignored by `decode()`. This method decodes given tokens with timestamps tokens annotated, e.g. "<|1.08|>". """ timestamp_begin = self.all_special_ids[-1] + 1 outputs = [[]] cur_max_timestamp = 0.0 prev_segments_len = 0.0 for token in token_ids: if token >= timestamp_begin: timestamp = float((token - timestamp_begin) * time_precision) if timestamp < cur_max_timestamp: # next segment has started prev_segments_len += cur_max_timestamp cur_max_timestamp = timestamp outputs.append(f"<|{(timestamp + prev_segments_len):.2f}|>") outputs.append([]) else: outputs[-1].append(token) outputs = [ s if isinstance(s, str) else self.decode(s, skip_special_tokens=skip_special_tokens) for s in outputs ] return "".join(outputs) # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._compute_offsets def _compute_offsets(self, token_ids, time_precision=0.02): """ Compute offsets for a given tokenized input Args: token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. time_precision (`float`, `optional`, defaults to 0.02): The time ratio to convert from token to time. """ offsets = [] # ensure torch tensor of token ids is placed on cpu if "torch" in str(type(token_ids)) and (hasattr(token_ids, "cpu") and callable(token_ids.cpu)): token_ids = token_ids.cpu() token_ids = np.array(token_ids) if token_ids.shape[0] > 1 and len(token_ids.shape) > 1: raise ValueError("Can only process a single input at a time") timestamp_begin = self.all_special_ids[-1] + 1 timestamp_tokens = token_ids >= timestamp_begin consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1 if consecutive.shape[0] == 0 and timestamp_tokens.sum() <= 1: # either there are no timestamps or there are no consecutive ones return [] elif np.where(timestamp_tokens)[0][-1] + 1 not in consecutive: # we add the final timestamp if it is not already in the list consecutive = np.append(consecutive, np.where(timestamp_tokens)[0][-1] + 1) last_slice = np.where(timestamp_tokens)[0][0] for current_slice in consecutive: sliced_tokens = token_ids[last_slice:current_slice] if len(sliced_tokens) > 1: start_timestamp_position = sliced_tokens[0].item() - timestamp_begin end_timestamp_position = sliced_tokens[-1].item() - timestamp_begin # strip timestamp tokens from the text output sliced_tokens = self._preprocess_token_ids(sliced_tokens) text = self._decode(sliced_tokens) text = self._filter_timestamp_ids(text) offsets.append( { "text": text, "timestamp": ( start_timestamp_position * time_precision, end_timestamp_position * time_precision, ), } ) last_slice = current_slice return offsets @lru_cache # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.timestamp_ids def timestamp_ids(self, time_precision=0.02): """ Compute the timestamp token ids for a given precision and save to least-recently used (LRU) cache. Args: time_precision (`float`, `optional`, defaults to 0.02): The time ratio to convert from token to time. """ return self.convert_tokens_to_ids([("<|%.2f|>" % (i * time_precision)) for i in range(1500 + 1)]) # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._preprocess_token_ids def _preprocess_token_ids(self, token_ids, skip_special_tokens: bool = False): """ Pre-process the token ids for decoding by removing the prompt tokens ids and timestamp token ids. Args: token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Typically, obtained using the `__call__` method of the tokenizer. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens from the token ids. If `True`, the prompt token ids will be removed. """ if skip_special_tokens: prompt_token_id = self.convert_tokens_to_ids("<|startofprev|>") decoder_start_token_id = self.convert_tokens_to_ids("<|startoftranscript|>") token_ids = self._strip_prompt(token_ids, prompt_token_id, decoder_start_token_id) return token_ids # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._filter_timestamp_ids def _filter_timestamp_ids(self, token_ids): return re.sub(self.timestamp_pat, "", token_ids) # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.decode def decode( self, token_ids, skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, output_offsets: bool = False, time_precision: float = 0.02, decode_with_timestamps: bool = False, normalize: bool = False, basic_normalize: bool = False, remove_diacritics: bool = False, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. If `None`, will default to `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`). output_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output the offsets of the tokens. This should only be set if the model predicted timestamps. time_precision (`float`, `optional`, defaults to 0.02): The time ratio to convert from token to time. decode_with_timestamps (`bool`, *optional*, defaults to `False`): Whether or not to decode with timestamps included in the raw text. normalize (`bool`, *optional*, defaults to `False`): Whether or not to apply the English text normalizer to the decoded text. Only applicable when the target text is in English. Otherwise, the basic text normalizer should be applied. basic_normalize (`bool`, *optional*, defaults to `False`): Whether or not to apply the Basic text normalizer to the decoded text. Applicable to multilingual target text. remove_diacritics (`bool`, *optional*, defaults to `False`): Whether or not to remove diacritics when applying the Basic text normalizer. Removing diacritics may destroy information in the decoded text, hence it should be used with caution. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str`: The decoded sentence. """ filtered_ids = self._preprocess_token_ids( token_ids, skip_special_tokens=skip_special_tokens, ) text = super().decode( filtered_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, normalize=normalize, basic_normalize=basic_normalize, remove_diacritics=remove_diacritics, **kwargs, ) if decode_with_timestamps: # legacy method to decode timestamps when not included in the tokenizer vocabulary text = self._decode_with_timestamps( filtered_ids, time_precision=time_precision, skip_special_tokens=skip_special_tokens ) else: text = self._filter_timestamp_ids(text) # retrieve offsets if output_offsets: offsets = self._compute_offsets(token_ids, time_precision=time_precision) return {"text": text, "offsets": offsets} return text def _decode( self, *args, normalize: bool = False, basic_normalize: bool = False, remove_diacritics: bool = False, **kwargs ) -> str: text = super()._decode(*args, **kwargs) if normalize: clean_text = self._normalize(text) return clean_text elif basic_normalize: clean_text = self._basic_normalize(text, remove_diacritics=remove_diacritics) return clean_text else: return text # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._normalize def _normalize(self, text): warnings.warn( "The private method `_normalize` is deprecated and will be removed in v5 of Transformers." "You can normalize an input string using the Whisper English normalizer using the `normalize` method." ) return self.normalize(text) # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._basic_normalize def _basic_normalize(self, text, remove_diacritics=False): warnings.warn( "The private method `_basic_normalize` is deprecated and will be removed in v5 of Transformers." "You can normalize an input string using the Whisper basic normalizer using the `basic_normalize` method." ) return self.basic_normalize(text, remove_diacritics=remove_diacritics) # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.normalize def normalize(self, text): """ Normalize a given string using the `EnglishTextNormalizer` class, which preforms commons transformation on english text. """ normalizer = EnglishTextNormalizer(self.english_spelling_normalizer) return normalizer(text) @staticmethod # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.basic_normalize def basic_normalize(text, remove_diacritics=False): """ Normalize a given string using the `BasicTextNormalizer` class, which preforms commons transformation on multilingual text. """ normalizer = BasicTextNormalizer(remove_diacritics=remove_diacritics) return normalizer(text) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) normalizer_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["normalizer_file"] ) if self.english_spelling_normalizer is not None: with open(normalizer_file, "w", encoding="utf-8") as f: f.write( json.dumps(self.english_spelling_normalizer, indent=2, sort_keys=True, ensure_ascii=False) + "\n" ) return tuple(files) + (normalizer_file,) def set_prefix_tokens(self, language: str = None, task: str = None, predict_timestamps: bool = None): """ Override the prefix tokens appended to the start of the label sequence. This method can be used standalone to update the prefix tokens as required when fine-tuning. Example: ```python >>> # instantiate the tokenizer and set the prefix token to Spanish >>> tokenizer = WhisperTokenizerFast.from_pretrained("openai/whisper-tiny", language="spanish") >>> # now switch the prefix token from Spanish to French >>> tokenizer.set_prefix_tokens(language="french") ``` Args: language (`str`, *optional*, defaults to `None`): The language of the transcription text. task (`str`, *optional*, defaults to `None`): Task identifier to append at the start of sequence (if any). predict_timestamps (`bool`, *optional*, defaults to `None`): Whether to omit the `<|notimestamps|>` token at the start of the sequence. """ self.language = language if language is not None else self.language self.task = task if task is not None else self.task self.predict_timestamps = predict_timestamps if predict_timestamps is not None else self.predict_timestamps prefix_token_ids = self.prefix_tokens prefixes = self.convert_ids_to_tokens(prefix_token_ids) eos = self.eos_token eos_token_id = self.eos_token_id prefix_template = " ".join([f"{token}:0" for token in prefixes]) self.backend_tokenizer.post_processor = processors.TemplateProcessing( single=f"{prefix_template} $A:0 {eos}:0", pair=f"{prefix_template} $A:0 $B:1 {eos}:1", special_tokens=[ (eos, eos_token_id), *zip(prefixes, prefix_token_ids), ], ) @property # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.prefix_tokens def prefix_tokens(self) -> List[int]: bos_token_id = self.convert_tokens_to_ids("<|startoftranscript|>") translate_token_id = self.convert_tokens_to_ids("<|translate|>") transcribe_token_id = self.convert_tokens_to_ids("<|transcribe|>") notimestamps_token_id = self.convert_tokens_to_ids("<|notimestamps|>") langs = tuple(LANGUAGES.keys()) if self.language is not None: self.language = self.language.lower() if self.language in TO_LANGUAGE_CODE: language_id = TO_LANGUAGE_CODE[self.language] elif self.language in TO_LANGUAGE_CODE.values(): language_id = self.language else: is_language_code = len(self.language) == 2 raise ValueError( f"Unsupported language: {self.language}. Language should be one of:" f" {list(TO_LANGUAGE_CODE.values()) if is_language_code else list(TO_LANGUAGE_CODE.keys())}." ) if self.task is not None: if self.task not in TASK_IDS: raise ValueError(f"Unsupported task: {self.task}. Task should be in: {TASK_IDS}") bos_sequence = [bos_token_id] if self.language is not None: bos_sequence.append(bos_token_id + 1 + langs.index(language_id)) if self.task is not None: bos_sequence.append(transcribe_token_id if self.task == "transcribe" else translate_token_id) if not self.predict_timestamps: bos_sequence.append(notimestamps_token_id) return bos_sequence # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]: """Build model inputs from a sequence by appending eos_token_id.""" if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + [self.eos_token_id] # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] if token_ids_1 is None: return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones @property # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.default_chat_template def default_chat_template(self): """ A simple chat template that ignores role information and just concatenates messages with EOS tokens. """ logger.warning_once( "\nNo chat template is defined for this tokenizer - using the default template " f"for the {self.__class__.__name__} class. If the default is not appropriate for " "your model, please set `tokenizer.chat_template` to an appropriate template. " "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n" ) return "{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}" # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.get_decoder_prompt_ids def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): self.set_prefix_tokens(task=task, language=language, predict_timestamps=not no_timestamps) # prefix tokens are of the form: <|startoftranscript|> <|lang_id|> <|task|> <|notimestamps|> # we don't want to force the bos token at position 1, as this is the starting token # when we generate, so we slice the prefix tokens to: <|lang_id|> <|task|> <|notimestamps|> # to get the forced tokens forced_tokens = self.prefix_tokens[1:] forced_decoder_ids = [(rank + 1, token) for rank, token in enumerate(forced_tokens)] return forced_decoder_ids def _decode_asr(self, model_outputs, *, return_timestamps, return_language, time_precision): return _decode_asr( self, model_outputs, return_timestamps=return_timestamps, return_language=return_language, time_precision=time_precision, ) # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer.get_prompt_ids def get_prompt_ids(self, text: str, return_tensors="np"): """Converts prompt text to IDs that can be passed to [`~WhisperForConditionalGeneration.generate`].""" batch_encoding = self("<|startofprev|>", " " + text.strip(), add_special_tokens=False) # Check for special tokens prompt_text_ids = batch_encoding["input_ids"][1:] special_token_id = next((x for x in prompt_text_ids if x >= self.all_special_ids[0]), None) if special_token_id is not None: token = self.convert_ids_to_tokens(special_token_id) raise ValueError(f"Encountered text in the prompt corresponding to disallowed special token: {token}.") batch_encoding.convert_to_tensors(tensor_type=return_tensors) return batch_encoding["input_ids"] @staticmethod # Copied from transformers.models.whisper.tokenization_whisper.WhisperTokenizer._strip_prompt def _strip_prompt(token_ids: List[int], prompt_token_id: int, decoder_start_token_id: int): has_prompt = isinstance(token_ids, list) and token_ids and token_ids[0] == prompt_token_id if has_prompt: if decoder_start_token_id in token_ids: return token_ids[token_ids.index(decoder_start_token_id) :] else: return [] return token_ids
transformers/src/transformers/models/whisper/tokenization_whisper_fast.py/0
{ "file_path": "transformers/src/transformers/models/whisper/tokenization_whisper_fast.py", "repo_id": "transformers", "token_count": 13837 }
356
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OpenAI GPT checkpoint.""" import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def convert_xlm_checkpoint_to_pytorch(xlm_checkpoint_path, pytorch_dump_folder_path): # Load checkpoint chkpt = torch.load(xlm_checkpoint_path, map_location="cpu") state_dict = chkpt["model"] # We have the base model one level deeper than the original XLM repository two_levels_state_dict = {} for k, v in state_dict.items(): if "pred_layer" in k: two_levels_state_dict[k] = v else: two_levels_state_dict["transformer." + k] = v config = chkpt["params"] config = {n: v for n, v in config.items() if not isinstance(v, (torch.FloatTensor, numpy.ndarray))} vocab = chkpt["dico_word2id"] vocab = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@", ""): i for s, i in vocab.items()} # Save pytorch-model pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"] print(f"Save PyTorch model to {pytorch_weights_dump_path}") torch.save(two_levels_state_dict, pytorch_weights_dump_path) print(f"Save configuration file to {pytorch_config_dump_path}") with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(json.dumps(config, indent=2) + "\n") print(f"Save vocab file to {pytorch_config_dump_path}") with open(pytorch_vocab_dump_path, "w", encoding="utf-8") as f: f.write(json.dumps(vocab, indent=2) + "\n") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
transformers/src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 1106 }
357
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XLM_ROBERTa_XL configuration""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class XLMRobertaXLConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`XLMRobertaXLModel`] or a [`TFXLMRobertaXLModel`]. It is used to instantiate a XLM_ROBERTA_XL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the XLM_ROBERTA_XL [facebook/xlm-roberta-xl](https://huggingface.co/facebook/xlm-roberta-xl) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 250880): Vocabulary size of the XLM_ROBERTA_XL model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`XLMRobertaXLModel`]. hidden_size (`int`, *optional*, defaults to 2560): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 36): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 10240): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 514): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 1): The vocabulary size of the `token_type_ids` passed when calling [`XLMRobertaXLModel`] or [`TFXLMRobertaXLModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Examples: ```python >>> from transformers import XLMRobertaXLConfig, XLMRobertaXLModel >>> # Initializing a XLM_ROBERTA_XL google-bert/bert-base-uncased style configuration >>> configuration = XLMRobertaXLConfig() >>> # Initializing a model (with random weights) from the google-bert/bert-base-uncased style configuration >>> model = XLMRobertaXLModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "xlm-roberta-xl" def __init__( self, vocab_size=250880, hidden_size=2560, num_hidden_layers=36, num_attention_heads=32, intermediate_size=10240, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=514, type_vocab_size=1, initializer_range=0.02, layer_norm_eps=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type="absolute", use_cache=True, classifier_dropout=None, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.classifier_dropout = classifier_dropout # Copied from transformers.models.roberta.configuration_roberta.RobertaOnnxConfig with Roberta->XLMRobertaXL class XLMRobertaXLOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
transformers/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py/0
{ "file_path": "transformers/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py", "repo_id": "transformers", "token_count": 2941 }
358
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert YOLOS checkpoints from the original repository. URL: https://github.com/hustvl/YOLOS""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_yolos_config(yolos_name: str) -> YolosConfig: config = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: config.hidden_size = 192 config.intermediate_size = 768 config.num_hidden_layers = 12 config.num_attention_heads = 3 config.image_size = [800, 1333] config.use_mid_position_embeddings = False elif yolos_name == "yolos_s_dWr": config.hidden_size = 330 config.num_hidden_layers = 14 config.num_attention_heads = 6 config.intermediate_size = 1320 elif "yolos_s" in yolos_name: config.hidden_size = 384 config.intermediate_size = 1536 config.num_hidden_layers = 12 config.num_attention_heads = 6 elif "yolos_b" in yolos_name: config.image_size = [800, 1344] config.num_labels = 91 repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict: dict, config: YolosConfig, base_model: bool = False): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :] state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :] state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] def rename_key(name: str) -> str: if "backbone" in name: name = name.replace("backbone", "vit") if "cls_token" in name: name = name.replace("cls_token", "embeddings.cls_token") if "det_token" in name: name = name.replace("det_token", "embeddings.detection_tokens") if "mid_pos_embed" in name: name = name.replace("mid_pos_embed", "encoder.mid_position_embeddings") if "pos_embed" in name: name = name.replace("pos_embed", "embeddings.position_embeddings") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") if "blocks" in name: name = name.replace("blocks", "encoder.layer") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "class_embed" in name: name = name.replace("class_embed", "class_labels_classifier") if "bbox_embed" in name: name = name.replace("bbox_embed", "bbox_predictor") if "vit.norm" in name: name = name.replace("vit.norm", "vit.layernorm") return name def convert_state_dict(orig_state_dict: dict, model: YolosForObjectDetection) -> dict: for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if "qkv" in key: key_split = key.split(".") layer_num = int(key_split[2]) dim = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.query.weight"] = val[:dim, :] orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.key.weight"] = val[ dim : dim * 2, : ] orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.value.weight"] = val[-dim:, :] else: orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.query.bias"] = val[:dim] orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.key.bias"] = val[dim : dim * 2] orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.value.bias"] = val[-dim:] else: orig_state_dict[rename_key(key)] = val return orig_state_dict # We will verify our results on an image of cute cats def prepare_img() -> torch.Tensor: url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_yolos_checkpoint( yolos_name: str, checkpoint_path: str, pytorch_dump_folder_path: str, push_to_hub: bool = False ): """ Copy/paste/tweak model's weights to our YOLOS structure. """ config = get_yolos_config(yolos_name) # load original state_dict state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] # load 🤗 model model = YolosForObjectDetection(config) model.eval() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) # Check outputs on an image, prepared by YolosImageProcessor size = 800 if yolos_name != "yolos_ti" else 512 image_processor = YolosImageProcessor(format="coco_detection", size=size) encoding = image_processor(images=prepare_img(), return_tensors="pt") outputs = model(**encoding) logits, pred_boxes = outputs.logits, outputs.pred_boxes expected_slice_logits, expected_slice_boxes = None, None if yolos_name == "yolos_ti": expected_slice_logits = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) expected_slice_boxes = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": expected_slice_logits = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) expected_slice_boxes = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": expected_slice_logits = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) expected_slice_boxes = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": expected_slice_logits = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) expected_slice_boxes = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": expected_slice_logits = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) expected_slice_boxes = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}") assert torch.allclose(logits[0, :3, :3], expected_slice_logits, atol=1e-4) assert torch.allclose(pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4) Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model_mapping = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub...") model_name = model_mapping[yolos_name] image_processor.push_to_hub(model_name, organization="hustvl") model.push_to_hub(model_name, organization="hustvl") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/yolos/convert_yolos_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/yolos/convert_yolos_to_pytorch.py", "repo_id": "transformers", "token_count": 5081 }
359
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from huggingface_hub import model_info from ..configuration_utils import PretrainedConfig from ..dynamic_module_utils import get_class_from_dynamic_module from ..feature_extraction_utils import PreTrainedFeatureExtractor from ..image_processing_utils import BaseImageProcessor from ..models.auto.configuration_auto import AutoConfig from ..models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor from ..models.auto.image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor from ..models.auto.modeling_auto import AutoModelForDepthEstimation, AutoModelForImageToImage from ..models.auto.tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer from ..tokenization_utils import PreTrainedTokenizer from ..utils import ( CONFIG_NAME, HUGGINGFACE_CO_RESOLVE_ENDPOINT, cached_file, extract_commit_hash, find_adapter_config_file, is_kenlm_available, is_offline_mode, is_peft_available, is_pyctcdecode_available, is_tf_available, is_torch_available, logging, ) from .audio_classification import AudioClassificationPipeline from .automatic_speech_recognition import AutomaticSpeechRecognitionPipeline from .base import ( ArgumentHandler, CsvPipelineDataFormat, JsonPipelineDataFormat, PipedPipelineDataFormat, Pipeline, PipelineDataFormat, PipelineException, PipelineRegistry, get_default_model_and_revision, infer_framework_load_model, ) from .conversational import Conversation, ConversationalPipeline from .depth_estimation import DepthEstimationPipeline from .document_question_answering import DocumentQuestionAnsweringPipeline from .feature_extraction import FeatureExtractionPipeline from .fill_mask import FillMaskPipeline from .image_classification import ImageClassificationPipeline from .image_feature_extraction import ImageFeatureExtractionPipeline from .image_segmentation import ImageSegmentationPipeline from .image_to_image import ImageToImagePipeline from .image_to_text import ImageToTextPipeline from .mask_generation import MaskGenerationPipeline from .object_detection import ObjectDetectionPipeline from .question_answering import QuestionAnsweringArgumentHandler, QuestionAnsweringPipeline from .table_question_answering import TableQuestionAnsweringArgumentHandler, TableQuestionAnsweringPipeline from .text2text_generation import SummarizationPipeline, Text2TextGenerationPipeline, TranslationPipeline from .text_classification import TextClassificationPipeline from .text_generation import TextGenerationPipeline from .text_to_audio import TextToAudioPipeline from .token_classification import ( AggregationStrategy, NerPipeline, TokenClassificationArgumentHandler, TokenClassificationPipeline, ) from .video_classification import VideoClassificationPipeline from .visual_question_answering import VisualQuestionAnsweringPipeline from .zero_shot_audio_classification import ZeroShotAudioClassificationPipeline from .zero_shot_classification import ZeroShotClassificationArgumentHandler, ZeroShotClassificationPipeline from .zero_shot_image_classification import ZeroShotImageClassificationPipeline from .zero_shot_object_detection import ZeroShotObjectDetectionPipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForImageClassification, TFAutoModelForMaskedLM, TFAutoModelForQuestionAnswering, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelForVision2Seq, TFAutoModelForZeroShotImageClassification, ) if is_torch_available(): import torch from ..models.auto.modeling_auto import ( AutoModel, AutoModelForAudioClassification, AutoModelForCausalLM, AutoModelForCTC, AutoModelForDocumentQuestionAnswering, AutoModelForImageClassification, AutoModelForImageSegmentation, AutoModelForMaskedLM, AutoModelForMaskGeneration, AutoModelForObjectDetection, AutoModelForQuestionAnswering, AutoModelForSemanticSegmentation, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForSpeechSeq2Seq, AutoModelForTableQuestionAnswering, AutoModelForTextToSpectrogram, AutoModelForTextToWaveform, AutoModelForTokenClassification, AutoModelForVideoClassification, AutoModelForVision2Seq, AutoModelForVisualQuestionAnswering, AutoModelForZeroShotImageClassification, AutoModelForZeroShotObjectDetection, ) if TYPE_CHECKING: from ..modeling_tf_utils import TFPreTrainedModel from ..modeling_utils import PreTrainedModel from ..tokenization_utils_fast import PreTrainedTokenizerFast logger = logging.get_logger(__name__) # Register all the supported tasks here TASK_ALIASES = { "sentiment-analysis": "text-classification", "ner": "token-classification", "vqa": "visual-question-answering", "text-to-speech": "text-to-audio", } SUPPORTED_TASKS = { "audio-classification": { "impl": AudioClassificationPipeline, "tf": (), "pt": (AutoModelForAudioClassification,) if is_torch_available() else (), "default": {"model": {"pt": ("superb/wav2vec2-base-superb-ks", "372e048")}}, "type": "audio", }, "automatic-speech-recognition": { "impl": AutomaticSpeechRecognitionPipeline, "tf": (), "pt": (AutoModelForCTC, AutoModelForSpeechSeq2Seq) if is_torch_available() else (), "default": {"model": {"pt": ("facebook/wav2vec2-base-960h", "55bb623")}}, "type": "multimodal", }, "text-to-audio": { "impl": TextToAudioPipeline, "tf": (), "pt": (AutoModelForTextToWaveform, AutoModelForTextToSpectrogram) if is_torch_available() else (), "default": {"model": {"pt": ("suno/bark-small", "645cfba")}}, "type": "text", }, "feature-extraction": { "impl": FeatureExtractionPipeline, "tf": (TFAutoModel,) if is_tf_available() else (), "pt": (AutoModel,) if is_torch_available() else (), "default": { "model": { "pt": ("distilbert/distilbert-base-cased", "935ac13"), "tf": ("distilbert/distilbert-base-cased", "935ac13"), } }, "type": "multimodal", }, "text-classification": { "impl": TextClassificationPipeline, "tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (), "pt": (AutoModelForSequenceClassification,) if is_torch_available() else (), "default": { "model": { "pt": ("distilbert/distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"), "tf": ("distilbert/distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"), }, }, "type": "text", }, "token-classification": { "impl": TokenClassificationPipeline, "tf": (TFAutoModelForTokenClassification,) if is_tf_available() else (), "pt": (AutoModelForTokenClassification,) if is_torch_available() else (), "default": { "model": { "pt": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"), "tf": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"), }, }, "type": "text", }, "question-answering": { "impl": QuestionAnsweringPipeline, "tf": (TFAutoModelForQuestionAnswering,) if is_tf_available() else (), "pt": (AutoModelForQuestionAnswering,) if is_torch_available() else (), "default": { "model": { "pt": ("distilbert/distilbert-base-cased-distilled-squad", "626af31"), "tf": ("distilbert/distilbert-base-cased-distilled-squad", "626af31"), }, }, "type": "text", }, "table-question-answering": { "impl": TableQuestionAnsweringPipeline, "pt": (AutoModelForTableQuestionAnswering,) if is_torch_available() else (), "tf": (TFAutoModelForTableQuestionAnswering,) if is_tf_available() else (), "default": { "model": { "pt": ("google/tapas-base-finetuned-wtq", "69ceee2"), "tf": ("google/tapas-base-finetuned-wtq", "69ceee2"), }, }, "type": "text", }, "visual-question-answering": { "impl": VisualQuestionAnsweringPipeline, "pt": (AutoModelForVisualQuestionAnswering,) if is_torch_available() else (), "tf": (), "default": { "model": {"pt": ("dandelin/vilt-b32-finetuned-vqa", "4355f59")}, }, "type": "multimodal", }, "document-question-answering": { "impl": DocumentQuestionAnsweringPipeline, "pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (), "tf": (), "default": { "model": {"pt": ("impira/layoutlm-document-qa", "52e01b3")}, }, "type": "multimodal", }, "fill-mask": { "impl": FillMaskPipeline, "tf": (TFAutoModelForMaskedLM,) if is_tf_available() else (), "pt": (AutoModelForMaskedLM,) if is_torch_available() else (), "default": { "model": { "pt": ("distilbert/distilroberta-base", "ec58a5b"), "tf": ("distilbert/distilroberta-base", "ec58a5b"), } }, "type": "text", }, "summarization": { "impl": SummarizationPipeline, "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (), "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (), "default": { "model": {"pt": ("sshleifer/distilbart-cnn-12-6", "a4f8f3e"), "tf": ("google-t5/t5-small", "d769bba")} }, "type": "text", }, # This task is a special case as it's parametrized by SRC, TGT languages. "translation": { "impl": TranslationPipeline, "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (), "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (), "default": { ("en", "fr"): {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}}, ("en", "de"): {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}}, ("en", "ro"): {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}}, }, "type": "text", }, "text2text-generation": { "impl": Text2TextGenerationPipeline, "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (), "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (), "default": {"model": {"pt": ("google-t5/t5-base", "686f1db"), "tf": ("google-t5/t5-base", "686f1db")}}, "type": "text", }, "text-generation": { "impl": TextGenerationPipeline, "tf": (TFAutoModelForCausalLM,) if is_tf_available() else (), "pt": (AutoModelForCausalLM,) if is_torch_available() else (), "default": {"model": {"pt": ("openai-community/gpt2", "6c0e608"), "tf": ("openai-community/gpt2", "6c0e608")}}, "type": "text", }, "zero-shot-classification": { "impl": ZeroShotClassificationPipeline, "tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (), "pt": (AutoModelForSequenceClassification,) if is_torch_available() else (), "default": { "model": { "pt": ("facebook/bart-large-mnli", "c626438"), "tf": ("FacebookAI/roberta-large-mnli", "130fb28"), }, "config": { "pt": ("facebook/bart-large-mnli", "c626438"), "tf": ("FacebookAI/roberta-large-mnli", "130fb28"), }, }, "type": "text", }, "zero-shot-image-classification": { "impl": ZeroShotImageClassificationPipeline, "tf": (TFAutoModelForZeroShotImageClassification,) if is_tf_available() else (), "pt": (AutoModelForZeroShotImageClassification,) if is_torch_available() else (), "default": { "model": { "pt": ("openai/clip-vit-base-patch32", "f4881ba"), "tf": ("openai/clip-vit-base-patch32", "f4881ba"), } }, "type": "multimodal", }, "zero-shot-audio-classification": { "impl": ZeroShotAudioClassificationPipeline, "tf": (), "pt": (AutoModel,) if is_torch_available() else (), "default": { "model": { "pt": ("laion/clap-htsat-fused", "973b6e5"), } }, "type": "multimodal", }, "conversational": { "impl": ConversationalPipeline, "tf": (TFAutoModelForSeq2SeqLM, TFAutoModelForCausalLM) if is_tf_available() else (), "pt": (AutoModelForSeq2SeqLM, AutoModelForCausalLM) if is_torch_available() else (), "default": { "model": {"pt": ("microsoft/DialoGPT-medium", "8bada3b"), "tf": ("microsoft/DialoGPT-medium", "8bada3b")} }, "type": "text", }, "image-classification": { "impl": ImageClassificationPipeline, "tf": (TFAutoModelForImageClassification,) if is_tf_available() else (), "pt": (AutoModelForImageClassification,) if is_torch_available() else (), "default": { "model": { "pt": ("google/vit-base-patch16-224", "5dca96d"), "tf": ("google/vit-base-patch16-224", "5dca96d"), } }, "type": "image", }, "image-feature-extraction": { "impl": ImageFeatureExtractionPipeline, "tf": (TFAutoModel,) if is_tf_available() else (), "pt": (AutoModel,) if is_torch_available() else (), "default": { "model": { "pt": ("google/vit-base-patch16-224", "29e7a1e183"), "tf": ("google/vit-base-patch16-224", "29e7a1e183"), } }, "type": "image", }, "image-segmentation": { "impl": ImageSegmentationPipeline, "tf": (), "pt": (AutoModelForImageSegmentation, AutoModelForSemanticSegmentation) if is_torch_available() else (), "default": {"model": {"pt": ("facebook/detr-resnet-50-panoptic", "fc15262")}}, "type": "multimodal", }, "image-to-text": { "impl": ImageToTextPipeline, "tf": (TFAutoModelForVision2Seq,) if is_tf_available() else (), "pt": (AutoModelForVision2Seq,) if is_torch_available() else (), "default": { "model": { "pt": ("ydshieh/vit-gpt2-coco-en", "65636df"), "tf": ("ydshieh/vit-gpt2-coco-en", "65636df"), } }, "type": "multimodal", }, "object-detection": { "impl": ObjectDetectionPipeline, "tf": (), "pt": (AutoModelForObjectDetection,) if is_torch_available() else (), "default": {"model": {"pt": ("facebook/detr-resnet-50", "2729413")}}, "type": "multimodal", }, "zero-shot-object-detection": { "impl": ZeroShotObjectDetectionPipeline, "tf": (), "pt": (AutoModelForZeroShotObjectDetection,) if is_torch_available() else (), "default": {"model": {"pt": ("google/owlvit-base-patch32", "17740e1")}}, "type": "multimodal", }, "depth-estimation": { "impl": DepthEstimationPipeline, "tf": (), "pt": (AutoModelForDepthEstimation,) if is_torch_available() else (), "default": {"model": {"pt": ("Intel/dpt-large", "e93beec")}}, "type": "image", }, "video-classification": { "impl": VideoClassificationPipeline, "tf": (), "pt": (AutoModelForVideoClassification,) if is_torch_available() else (), "default": {"model": {"pt": ("MCG-NJU/videomae-base-finetuned-kinetics", "4800870")}}, "type": "video", }, "mask-generation": { "impl": MaskGenerationPipeline, "tf": (), "pt": (AutoModelForMaskGeneration,) if is_torch_available() else (), "default": {"model": {"pt": ("facebook/sam-vit-huge", "997b15")}}, "type": "multimodal", }, "image-to-image": { "impl": ImageToImagePipeline, "tf": (), "pt": (AutoModelForImageToImage,) if is_torch_available() else (), "default": {"model": {"pt": ("caidas/swin2SR-classical-sr-x2-64", "4aaedcb")}}, "type": "image", }, } NO_FEATURE_EXTRACTOR_TASKS = set() NO_IMAGE_PROCESSOR_TASKS = set() NO_TOKENIZER_TASKS = set() # Those model configs are special, they are generic over their task, meaning # any tokenizer/feature_extractor might be use for a given model so we cannot # use the statically defined TOKENIZER_MAPPING and FEATURE_EXTRACTOR_MAPPING to # see if the model defines such objects or not. MULTI_MODEL_AUDIO_CONFIGS = {"SpeechEncoderDecoderConfig"} MULTI_MODEL_VISION_CONFIGS = {"VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig"} for task, values in SUPPORTED_TASKS.items(): if values["type"] == "text": NO_FEATURE_EXTRACTOR_TASKS.add(task) NO_IMAGE_PROCESSOR_TASKS.add(task) elif values["type"] in {"image", "video"}: NO_TOKENIZER_TASKS.add(task) elif values["type"] in {"audio"}: NO_TOKENIZER_TASKS.add(task) NO_IMAGE_PROCESSOR_TASKS.add(task) elif values["type"] != "multimodal": raise ValueError(f"SUPPORTED_TASK {task} contains invalid type {values['type']}") PIPELINE_REGISTRY = PipelineRegistry(supported_tasks=SUPPORTED_TASKS, task_aliases=TASK_ALIASES) def get_supported_tasks() -> List[str]: """ Returns a list of supported task strings. """ return PIPELINE_REGISTRY.get_supported_tasks() def get_task(model: str, token: Optional[str] = None, **deprecated_kwargs) -> str: use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token if is_offline_mode(): raise RuntimeError("You cannot infer task automatically within `pipeline` when using offline mode") try: info = model_info(model, token=token) except Exception as e: raise RuntimeError(f"Instantiating a pipeline without a task set raised an error: {e}") if not info.pipeline_tag: raise RuntimeError( f"The model {model} does not seem to have a correct `pipeline_tag` set to infer the task automatically" ) if getattr(info, "library_name", "transformers") != "transformers": raise RuntimeError(f"This model is meant to be used with {info.library_name} not with transformers") task = info.pipeline_tag return task def check_task(task: str) -> Tuple[str, Dict, Any]: """ Checks an incoming task string, to validate it's correct and return the default Pipeline and Model classes, and default models if they exist. Args: task (`str`): The task defining which pipeline will be returned. Currently accepted tasks are: - `"audio-classification"` - `"automatic-speech-recognition"` - `"conversational"` - `"depth-estimation"` - `"document-question-answering"` - `"feature-extraction"` - `"fill-mask"` - `"image-classification"` - `"image-feature-extraction"` - `"image-segmentation"` - `"image-to-text"` - `"image-to-image"` - `"object-detection"` - `"question-answering"` - `"summarization"` - `"table-question-answering"` - `"text2text-generation"` - `"text-classification"` (alias `"sentiment-analysis"` available) - `"text-generation"` - `"text-to-audio"` (alias `"text-to-speech"` available) - `"token-classification"` (alias `"ner"` available) - `"translation"` - `"translation_xx_to_yy"` - `"video-classification"` - `"visual-question-answering"` (alias `"vqa"` available) - `"zero-shot-classification"` - `"zero-shot-image-classification"` - `"zero-shot-object-detection"` Returns: (normalized_task: `str`, task_defaults: `dict`, task_options: (`tuple`, None)) The normalized task name (removed alias and options). The actual dictionary required to initialize the pipeline and some extra task options for parametrized tasks like "translation_XX_to_YY" """ return PIPELINE_REGISTRY.check_task(task) def clean_custom_task(task_info): import transformers if "impl" not in task_info: raise RuntimeError("This model introduces a custom pipeline without specifying its implementation.") pt_class_names = task_info.get("pt", ()) if isinstance(pt_class_names, str): pt_class_names = [pt_class_names] task_info["pt"] = tuple(getattr(transformers, c) for c in pt_class_names) tf_class_names = task_info.get("tf", ()) if isinstance(tf_class_names, str): tf_class_names = [tf_class_names] task_info["tf"] = tuple(getattr(transformers, c) for c in tf_class_names) return task_info, None def pipeline( task: str = None, model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map=None, torch_dtype=None, trust_remote_code: Optional[bool] = None, model_kwargs: Dict[str, Any] = None, pipeline_class: Optional[Any] = None, **kwargs, ) -> Pipeline: """ Utility factory method to build a [`Pipeline`]. Pipelines are made of: - A [tokenizer](tokenizer) in charge of mapping raw textual input to token. - A [model](model) to make predictions from the inputs. - Some (optional) post processing for enhancing model's output. Args: task (`str`): The task defining which pipeline will be returned. Currently accepted tasks are: - `"audio-classification"`: will return a [`AudioClassificationPipeline`]. - `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`]. - `"conversational"`: will return a [`ConversationalPipeline`]. - `"depth-estimation"`: will return a [`DepthEstimationPipeline`]. - `"document-question-answering"`: will return a [`DocumentQuestionAnsweringPipeline`]. - `"feature-extraction"`: will return a [`FeatureExtractionPipeline`]. - `"fill-mask"`: will return a [`FillMaskPipeline`]:. - `"image-classification"`: will return a [`ImageClassificationPipeline`]. - `"image-feature-extraction"`: will return an [`ImageFeatureExtractionPipeline`]. - `"image-segmentation"`: will return a [`ImageSegmentationPipeline`]. - `"image-to-image"`: will return a [`ImageToImagePipeline`]. - `"image-to-text"`: will return a [`ImageToTextPipeline`]. - `"mask-generation"`: will return a [`MaskGenerationPipeline`]. - `"object-detection"`: will return a [`ObjectDetectionPipeline`]. - `"question-answering"`: will return a [`QuestionAnsweringPipeline`]. - `"summarization"`: will return a [`SummarizationPipeline`]. - `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`]. - `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`]. - `"text-classification"` (alias `"sentiment-analysis"` available): will return a [`TextClassificationPipeline`]. - `"text-generation"`: will return a [`TextGenerationPipeline`]:. - `"text-to-audio"` (alias `"text-to-speech"` available): will return a [`TextToAudioPipeline`]:. - `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`]. - `"translation"`: will return a [`TranslationPipeline`]. - `"translation_xx_to_yy"`: will return a [`TranslationPipeline`]. - `"video-classification"`: will return a [`VideoClassificationPipeline`]. - `"visual-question-answering"`: will return a [`VisualQuestionAnsweringPipeline`]. - `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`]. - `"zero-shot-image-classification"`: will return a [`ZeroShotImageClassificationPipeline`]. - `"zero-shot-audio-classification"`: will return a [`ZeroShotAudioClassificationPipeline`]. - `"zero-shot-object-detection"`: will return a [`ZeroShotObjectDetectionPipeline`]. model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*): The model that will be used by the pipeline to make predictions. This can be a model identifier or an actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or [`TFPreTrainedModel`] (for TensorFlow). If not provided, the default for the `task` will be loaded. config (`str` or [`PretrainedConfig`], *optional*): The configuration that will be used by the pipeline to instantiate the model. This can be a model identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`]. If not provided, the default configuration file for the requested model will be used. That means that if `model` is given, its default configuration will be used. However, if `model` is not supplied, this `task`'s default model's config is used instead. tokenizer (`str` or [`PreTrainedTokenizer`], *optional*): The tokenizer that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`]. If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model` is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string). However, if `config` is also not given or not a string, then the default tokenizer for the given `task` will be loaded. feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*): The feature extractor that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`]. Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal models. Multi-modal models will also require a tokenizer to be passed. If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If `model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it is a string). However, if `config` is also not given or not a string, then the default feature extractor for the given `task` will be loaded. framework (`str`, *optional*): The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be installed. If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is provided. revision (`str`, *optional*, defaults to `"main"`): When passing a task name or a string model identifier: The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. use_fast (`bool`, *optional*, defaults to `True`): Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). device (`int` or `str` or `torch.device`): Defines the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which this pipeline will be allocated. device_map (`str` or `Dict[str, Union[int, str, torch.device]`, *optional*): Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set `device_map="auto"` to compute the most optimized `device_map` automatically (see [here](https://huggingface.co/docs/accelerate/main/en/package_reference/big_modeling#accelerate.cpu_offload) for more information). <Tip warning={true}> Do not use `device_map` AND `device` at the same time as they will conflict </Tip> torch_dtype (`str` or `torch.dtype`, *optional*): Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model (`torch.float16`, `torch.bfloat16`, ... or `"auto"`). trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom code defined on the Hub in their own modeling, configuration, tokenization or even pipeline files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. model_kwargs (`Dict[str, Any]`, *optional*): Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the specific pipeline init (see the documentation for the corresponding pipeline class for possible values). Returns: [`Pipeline`]: A suitable pipeline for the task. Examples: ```python >>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer >>> # Sentiment analysis pipeline >>> analyzer = pipeline("sentiment-analysis") >>> # Question answering pipeline, specifying the checkpoint identifier >>> oracle = pipeline( ... "question-answering", model="distilbert/distilbert-base-cased-distilled-squad", tokenizer="google-bert/bert-base-cased" ... ) >>> # Named entity recognition pipeline, passing in a specific model and tokenizer >>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") >>> recognizer = pipeline("ner", model=model, tokenizer=tokenizer) ```""" if model_kwargs is None: model_kwargs = {} # Make sure we only pass use_auth_token once as a kwarg (it used to be possible to pass it in model_kwargs, # this is to keep BC). use_auth_token = model_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token code_revision = kwargs.pop("code_revision", None) commit_hash = kwargs.pop("_commit_hash", None) hub_kwargs = { "revision": revision, "token": token, "trust_remote_code": trust_remote_code, "_commit_hash": commit_hash, } if task is None and model is None: raise RuntimeError( "Impossible to instantiate a pipeline without either a task or a model " "being specified. " "Please provide a task class or a model" ) if model is None and tokenizer is not None: raise RuntimeError( "Impossible to instantiate a pipeline with tokenizer specified but not the model as the provided tokenizer" " may not be compatible with the default model. Please provide a PreTrainedModel class or a" " path/identifier to a pretrained model when providing tokenizer." ) if model is None and feature_extractor is not None: raise RuntimeError( "Impossible to instantiate a pipeline with feature_extractor specified but not the model as the provided" " feature_extractor may not be compatible with the default model. Please provide a PreTrainedModel class" " or a path/identifier to a pretrained model when providing feature_extractor." ) if isinstance(model, Path): model = str(model) if commit_hash is None: pretrained_model_name_or_path = None if isinstance(config, str): pretrained_model_name_or_path = config elif config is None and isinstance(model, str): pretrained_model_name_or_path = model if not isinstance(config, PretrainedConfig) and pretrained_model_name_or_path is not None: # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible resolved_config_file = cached_file( pretrained_model_name_or_path, CONFIG_NAME, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, **hub_kwargs, ) hub_kwargs["_commit_hash"] = extract_commit_hash(resolved_config_file, commit_hash) else: hub_kwargs["_commit_hash"] = getattr(config, "_commit_hash", None) # Config is the primordial information item. # Instantiate config if needed if isinstance(config, str): config = AutoConfig.from_pretrained( config, _from_pipeline=task, code_revision=code_revision, **hub_kwargs, **model_kwargs ) hub_kwargs["_commit_hash"] = config._commit_hash elif config is None and isinstance(model, str): # Check for an adapter file in the model path if PEFT is available if is_peft_available(): # `find_adapter_config_file` doesn't accept `trust_remote_code` _hub_kwargs = {k: v for k, v in hub_kwargs.items() if k != "trust_remote_code"} maybe_adapter_path = find_adapter_config_file( model, token=hub_kwargs["token"], revision=hub_kwargs["revision"], _commit_hash=hub_kwargs["_commit_hash"], ) if maybe_adapter_path is not None: with open(maybe_adapter_path, "r", encoding="utf-8") as f: adapter_config = json.load(f) model = adapter_config["base_model_name_or_path"] config = AutoConfig.from_pretrained( model, _from_pipeline=task, code_revision=code_revision, **hub_kwargs, **model_kwargs ) hub_kwargs["_commit_hash"] = config._commit_hash custom_tasks = {} if config is not None and len(getattr(config, "custom_pipelines", {})) > 0: custom_tasks = config.custom_pipelines if task is None and trust_remote_code is not False: if len(custom_tasks) == 1: task = list(custom_tasks.keys())[0] else: raise RuntimeError( "We can't infer the task automatically for this model as there are multiple tasks available. Pick " f"one in {', '.join(custom_tasks.keys())}" ) if task is None and model is not None: if not isinstance(model, str): raise RuntimeError( "Inferring the task automatically requires to check the hub with a model_id defined as a `str`. " f"{model} is not a valid model_id." ) task = get_task(model, token) # Retrieve the task if task in custom_tasks: normalized_task = task targeted_task, task_options = clean_custom_task(custom_tasks[task]) if pipeline_class is None: if not trust_remote_code: raise ValueError( "Loading this pipeline requires you to execute the code in the pipeline file in that" " repo on your local machine. Make sure you have read the code there to avoid malicious use, then" " set the option `trust_remote_code=True` to remove this error." ) class_ref = targeted_task["impl"] pipeline_class = get_class_from_dynamic_module( class_ref, model, code_revision=code_revision, **hub_kwargs, ) else: normalized_task, targeted_task, task_options = check_task(task) if pipeline_class is None: pipeline_class = targeted_task["impl"] # Use default model/config/tokenizer for the task if no model is provided if model is None: # At that point framework might still be undetermined model, default_revision = get_default_model_and_revision(targeted_task, framework, task_options) revision = revision if revision is not None else default_revision logger.warning( f"No model was supplied, defaulted to {model} and revision" f" {revision} ({HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{model}).\n" "Using a pipeline without specifying a model name and revision in production is not recommended." ) if config is None and isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs) hub_kwargs["_commit_hash"] = config._commit_hash if device_map is not None: if "device_map" in model_kwargs: raise ValueError( 'You cannot use both `pipeline(... device_map=..., model_kwargs={"device_map":...})` as those' " arguments might conflict, use only one.)" ) if device is not None: logger.warning( "Both `device` and `device_map` are specified. `device` will override `device_map`. You" " will most likely encounter unexpected behavior. Please remove `device` and keep `device_map`." ) model_kwargs["device_map"] = device_map if torch_dtype is not None: if "torch_dtype" in model_kwargs: raise ValueError( 'You cannot use both `pipeline(... torch_dtype=..., model_kwargs={"torch_dtype":...})` as those' " arguments might conflict, use only one.)" ) if isinstance(torch_dtype, str) and hasattr(torch, torch_dtype): torch_dtype = getattr(torch, torch_dtype) model_kwargs["torch_dtype"] = torch_dtype model_name = model if isinstance(model, str) else None # Load the correct model if possible # Infer the framework from the model if not already defined if isinstance(model, str) or framework is None: model_classes = {"tf": targeted_task["tf"], "pt": targeted_task["pt"]} framework, model = infer_framework_load_model( model, model_classes=model_classes, config=config, framework=framework, task=task, **hub_kwargs, **model_kwargs, ) model_config = model.config hub_kwargs["_commit_hash"] = model.config._commit_hash load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None load_image_processor = type(model_config) in IMAGE_PROCESSOR_MAPPING or image_processor is not None # If `model` (instance of `PretrainedModel` instead of `str`) is passed (and/or same for config), while # `image_processor` or `feature_extractor` is `None`, the loading will fail. This happens particularly for some # vision tasks when calling `pipeline()` with `model` and only one of the `image_processor` and `feature_extractor`. # TODO: we need to make `NO_IMAGE_PROCESSOR_TASKS` and `NO_FEATURE_EXTRACTOR_TASKS` more robust to avoid such issue. # This block is only temporarily to make CI green. if load_image_processor and load_feature_extractor: load_feature_extractor = False if ( tokenizer is None and not load_tokenizer and normalized_task not in NO_TOKENIZER_TASKS # Using class name to avoid importing the real class. and ( model_config.__class__.__name__ in MULTI_MODEL_AUDIO_CONFIGS or model_config.__class__.__name__ in MULTI_MODEL_VISION_CONFIGS ) ): # This is a special category of models, that are fusions of multiple models # so the model_config might not define a tokenizer, but it seems to be # necessary for the task, so we're force-trying to load it. load_tokenizer = True if ( image_processor is None and not load_image_processor and normalized_task not in NO_IMAGE_PROCESSOR_TASKS # Using class name to avoid importing the real class. and model_config.__class__.__name__ in MULTI_MODEL_VISION_CONFIGS ): # This is a special category of models, that are fusions of multiple models # so the model_config might not define a tokenizer, but it seems to be # necessary for the task, so we're force-trying to load it. load_image_processor = True if ( feature_extractor is None and not load_feature_extractor and normalized_task not in NO_FEATURE_EXTRACTOR_TASKS # Using class name to avoid importing the real class. and model_config.__class__.__name__ in MULTI_MODEL_AUDIO_CONFIGS ): # This is a special category of models, that are fusions of multiple models # so the model_config might not define a tokenizer, but it seems to be # necessary for the task, so we're force-trying to load it. load_feature_extractor = True if task in NO_TOKENIZER_TASKS: # These will never require a tokenizer. # the model on the other hand might have a tokenizer, but # the files could be missing from the hub, instead of failing # on such repos, we just force to not load it. load_tokenizer = False if task in NO_FEATURE_EXTRACTOR_TASKS: load_feature_extractor = False if task in NO_IMAGE_PROCESSOR_TASKS: load_image_processor = False if load_tokenizer: # Try to infer tokenizer from model or config name (if provided as str) if tokenizer is None: if isinstance(model_name, str): tokenizer = model_name elif isinstance(config, str): tokenizer = config else: # Impossible to guess what is the right tokenizer here raise Exception( "Impossible to guess which tokenizer to use. " "Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer." ) # Instantiate tokenizer if needed if isinstance(tokenizer, (str, tuple)): if isinstance(tokenizer, tuple): # For tuple we have (tokenizer name, {kwargs}) use_fast = tokenizer[1].pop("use_fast", use_fast) tokenizer_identifier = tokenizer[0] tokenizer_kwargs = tokenizer[1] else: tokenizer_identifier = tokenizer tokenizer_kwargs = model_kwargs.copy() tokenizer_kwargs.pop("torch_dtype", None) tokenizer = AutoTokenizer.from_pretrained( tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs ) if load_image_processor: # Try to infer image processor from model or config name (if provided as str) if image_processor is None: if isinstance(model_name, str): image_processor = model_name elif isinstance(config, str): image_processor = config # Backward compatibility, as `feature_extractor` used to be the name # for `ImageProcessor`. elif feature_extractor is not None and isinstance(feature_extractor, BaseImageProcessor): image_processor = feature_extractor else: # Impossible to guess what is the right image_processor here raise Exception( "Impossible to guess which image processor to use. " "Please provide a PreTrainedImageProcessor class or a path/identifier " "to a pretrained image processor." ) # Instantiate image_processor if needed if isinstance(image_processor, (str, tuple)): image_processor = AutoImageProcessor.from_pretrained( image_processor, _from_pipeline=task, **hub_kwargs, **model_kwargs ) if load_feature_extractor: # Try to infer feature extractor from model or config name (if provided as str) if feature_extractor is None: if isinstance(model_name, str): feature_extractor = model_name elif isinstance(config, str): feature_extractor = config else: # Impossible to guess what is the right feature_extractor here raise Exception( "Impossible to guess which feature extractor to use. " "Please provide a PreTrainedFeatureExtractor class or a path/identifier " "to a pretrained feature extractor." ) # Instantiate feature_extractor if needed if isinstance(feature_extractor, (str, tuple)): feature_extractor = AutoFeatureExtractor.from_pretrained( feature_extractor, _from_pipeline=task, **hub_kwargs, **model_kwargs ) if ( feature_extractor._processor_class and feature_extractor._processor_class.endswith("WithLM") and isinstance(model_name, str) ): try: import kenlm # to trigger `ImportError` if not installed from pyctcdecode import BeamSearchDecoderCTC if os.path.isdir(model_name) or os.path.isfile(model_name): decoder = BeamSearchDecoderCTC.load_from_dir(model_name) else: language_model_glob = os.path.join( BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*" ) alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME allow_patterns = [language_model_glob, alphabet_filename] decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_patterns=allow_patterns) kwargs["decoder"] = decoder except ImportError as e: logger.warning(f"Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Error: {e}") if not is_kenlm_available(): logger.warning("Try to install `kenlm`: `pip install kenlm") if not is_pyctcdecode_available(): logger.warning("Try to install `pyctcdecode`: `pip install pyctcdecode") if task == "translation" and model.config.task_specific_params: for key in model.config.task_specific_params: if key.startswith("translation"): task = key warnings.warn( f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"', UserWarning, ) break if tokenizer is not None: kwargs["tokenizer"] = tokenizer if feature_extractor is not None: kwargs["feature_extractor"] = feature_extractor if torch_dtype is not None: kwargs["torch_dtype"] = torch_dtype if image_processor is not None: kwargs["image_processor"] = image_processor if device is not None: kwargs["device"] = device return pipeline_class(model=model, framework=framework, task=task, **kwargs)
transformers/src/transformers/pipelines/__init__.py/0
{ "file_path": "transformers/src/transformers/pipelines/__init__.py", "repo_id": "transformers", "token_count": 21563 }
360
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import Pipeline, build_pipeline_init_args if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import ( MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, ) logger = logging.get_logger(__name__) Prediction = Dict[str, Any] Predictions = List[Prediction] @add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) class ObjectDetectionPipeline(Pipeline): """ Object detection pipeline using any `AutoModelForObjectDetection`. This pipeline predicts bounding boxes of objects and their classes. Example: ```python >>> from transformers import pipeline >>> detector = pipeline(model="facebook/detr-resnet-50") >>> detector("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") [{'score': 0.997, 'label': 'bird', 'box': {'xmin': 69, 'ymin': 171, 'xmax': 396, 'ymax': 507}}, {'score': 0.999, 'label': 'bird', 'box': {'xmin': 398, 'ymin': 105, 'xmax': 767, 'ymax': 507}}] >>> # x, y are expressed relative to the top left hand corner. ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"object-detection"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=object-detection). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch.") requires_backends(self, "vision") mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES.copy() mapping.update(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES) self.check_model_type(mapping) def _sanitize_parameters(self, **kwargs): preprocess_params = {} if "timeout" in kwargs: preprocess_params["timeout"] = kwargs["timeout"] postprocess_kwargs = {} if "threshold" in kwargs: postprocess_kwargs["threshold"] = kwargs["threshold"] return preprocess_params, {}, postprocess_kwargs def __call__(self, *args, **kwargs) -> Union[Predictions, List[Prediction]]: """ Detect objects (bounding boxes & classes) in the image(s) passed as inputs. Args: images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing an HTTP(S) link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images. threshold (`float`, *optional*, defaults to 0.9): The probability necessary to make a prediction. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A list of dictionaries or a list of list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image. The dictionaries contain the following keys: - **label** (`str`) -- The class label identified by the model. - **score** (`float`) -- The score attributed by the model for that label. - **box** (`List[Dict[str, int]]`) -- The bounding box of detected object in image's original size. """ return super().__call__(*args, **kwargs) def preprocess(self, image, timeout=None): image = load_image(image, timeout=timeout) target_size = torch.IntTensor([[image.height, image.width]]) inputs = self.image_processor(images=[image], return_tensors="pt") if self.tokenizer is not None: inputs = self.tokenizer(text=inputs["words"], boxes=inputs["boxes"], return_tensors="pt") inputs["target_size"] = target_size return inputs def _forward(self, model_inputs): target_size = model_inputs.pop("target_size") outputs = self.model(**model_inputs) model_outputs = outputs.__class__({"target_size": target_size, **outputs}) if self.tokenizer is not None: model_outputs["bbox"] = model_inputs["bbox"] return model_outputs def postprocess(self, model_outputs, threshold=0.9): target_size = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. height, width = target_size[0].tolist() def unnormalize(bbox): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) scores, classes = model_outputs["logits"].squeeze(0).softmax(dim=-1).max(dim=-1) labels = [self.model.config.id2label[prediction] for prediction in classes.tolist()] boxes = [unnormalize(bbox) for bbox in model_outputs["bbox"].squeeze(0)] keys = ["score", "label", "box"] annotation = [dict(zip(keys, vals)) for vals in zip(scores.tolist(), labels, boxes) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel raw_annotations = self.image_processor.post_process_object_detection(model_outputs, threshold, target_size) raw_annotation = raw_annotations[0] scores = raw_annotation["scores"] labels = raw_annotation["labels"] boxes = raw_annotation["boxes"] raw_annotation["scores"] = scores.tolist() raw_annotation["labels"] = [self.model.config.id2label[label.item()] for label in labels] raw_annotation["boxes"] = [self._get_bounding_box(box) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] keys = ["score", "label", "box"] annotation = [ dict(zip(keys, vals)) for vals in zip(raw_annotation["scores"], raw_annotation["labels"], raw_annotation["boxes"]) ] return annotation def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]: """ Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... } Args: box (`torch.Tensor`): Tensor containing the coordinates in corners format. Returns: bbox (`Dict[str, int]`): Dict containing the coordinates in corners format. """ if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch.") xmin, ymin, xmax, ymax = box.int().tolist() bbox = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
transformers/src/transformers/pipelines/object_detection.py/0
{ "file_path": "transformers/src/transformers/pipelines/object_detection.py", "repo_id": "transformers", "token_count": 3401 }
361
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Set, Tuple, Union import torch from packaging import version from safetensors.torch import storage_ptr, storage_size from torch import nn from .utils import is_torch_xla_available, logging ALL_LAYERNORM_LAYERS = [nn.LayerNorm] logger = logging.get_logger(__name__) parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version) is_torch_greater_or_equal_than_2_2 = parsed_torch_version_base >= version.parse("2.2") is_torch_greater_or_equal_than_2_1 = parsed_torch_version_base >= version.parse("2.1") is_torch_greater_or_equal_than_2_0 = parsed_torch_version_base >= version.parse("2.0") is_torch_greater_or_equal_than_1_13 = parsed_torch_version_base >= version.parse("1.13") is_torch_greater_or_equal_than_1_12 = parsed_torch_version_base >= version.parse("1.12") def softmax_backward_data(parent, grad_output, output, dim, self): """ A function that calls the internal `_softmax_backward_data` PyTorch method and that adjusts the arguments according to the torch version detected. """ from torch import _softmax_backward_data return _softmax_backward_data(grad_output, output, parent.dim, self.dtype) def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear: """ Prune a linear layer to keep only entries in index. Used to remove heads. Args: layer (`torch.nn.Linear`): The layer to prune. index (`torch.LongTensor`): The indices to keep in the layer. dim (`int`, *optional*, defaults to 0): The dimension on which to keep the indices. Returns: `torch.nn.Linear`: The pruned layer as a new layer with `requires_grad=True`. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer class Conv1D(nn.Module): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (`int`): The number of output features. nx (`int`): The number of input features. """ def __init__(self, nf, nx): super().__init__() self.nf = nf self.weight = nn.Parameter(torch.empty(nx, nf)) self.bias = nn.Parameter(torch.zeros(nf)) nn.init.normal_(self.weight, std=0.02) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(size_out) return x def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D: """ Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed. Used to remove heads. Args: layer ([`~pytorch_utils.Conv1D`]): The layer to prune. index (`torch.LongTensor`): The indices to keep in the layer. dim (`int`, *optional*, defaults to 1): The dimension on which to keep the indices. Returns: [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if dim == 0: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer def prune_layer( layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None ) -> Union[nn.Linear, Conv1D]: """ Prune a Conv1D or linear layer to keep only entries in index. Used to remove heads. Args: layer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune. index (`torch.LongTensor`): The indices to keep in the layer. dim (`int`, *optional*): The dimension on which to keep the indices. Returns: `torch.nn.Linear` or [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`. """ if isinstance(layer, nn.Linear): return prune_linear_layer(layer, index, dim=0 if dim is None else dim) elif isinstance(layer, Conv1D): return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim) else: raise ValueError(f"Can't prune layer of class {layer.__class__}") def apply_chunking_to_forward( forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors ) -> torch.Tensor: """ This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension `chunk_dim`. It then applies a layer `forward_fn` to each chunk independently to save memory. If the `forward_fn` is independent across the `chunk_dim` this function will yield the same result as directly applying `forward_fn` to `input_tensors`. Args: forward_fn (`Callable[..., torch.Tensor]`): The forward function of the model. chunk_size (`int`): The chunk size of a chunked tensor: `num_chunks = len(input_tensors[0]) / chunk_size`. chunk_dim (`int`): The dimension over which the `input_tensors` should be chunked. input_tensors (`Tuple[torch.Tensor]`): The input tensors of `forward_fn` which will be chunked Returns: `torch.Tensor`: A tensor with the same shape as the `forward_fn` would have given if applied`. Examples: ```python # rename the usual forward() fn to forward_chunk() def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states # implement a chunked forward function def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) ```""" assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors" # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters) if num_args_in_forward_chunk_fn != len(input_tensors): raise ValueError( f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input " "tensors are given" ) if chunk_size > 0: tensor_shape = input_tensors[0].shape[chunk_dim] for input_tensor in input_tensors: if input_tensor.shape[chunk_dim] != tensor_shape: raise ValueError( f"All input tenors have to be of the same shape: {tensor_shape}, " f"found shape {input_tensor.shape[chunk_dim]}" ) if input_tensors[0].shape[chunk_dim] % chunk_size != 0: raise ValueError( f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk " f"size {chunk_size}" ) num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size # chunk input tensor into tuples input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors) # apply forward fn to every tuple output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks)) # concatenate output at same dimension return torch.cat(output_chunks, dim=chunk_dim) return forward_fn(*input_tensors) def find_pruneable_heads_and_indices( heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int] ) -> Tuple[Set[int], torch.LongTensor]: """ Finds the heads and their indices taking `already_pruned_heads` into account. Args: heads (`List[int]`): List of the indices of heads to prune. n_heads (`int`): The number of heads in the model. head_size (`int`): The size of each head. already_pruned_heads (`Set[int]`): A set of already pruned heads. Returns: `Tuple[Set[int], torch.LongTensor]`: A tuple with the indices of heads to prune taking `already_pruned_heads` into account and the indices of rows/columns to keep in the layer weight. """ mask = torch.ones(n_heads, head_size) heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in already_pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index: torch.LongTensor = torch.arange(len(mask))[mask].long() return heads, index def meshgrid( *tensors: Union[torch.Tensor, List[torch.Tensor]], indexing: Optional[str] = None ) -> Tuple[torch.Tensor, ...]: """ Wrapper around torch.meshgrid to avoid warning messages about the introduced `indexing` argument. Reference: https://pytorch.org/docs/1.13/generated/torch.meshgrid.html """ return torch.meshgrid(*tensors, indexing=indexing) def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]: """ Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id. """ if tensor.device.type == "xla" and is_torch_xla_available(): # NOTE: xla tensors dont have storage # use some other unique id to distinguish. # this is a XLA tensor, it must be created using torch_xla's # device. So the following import is safe: import torch_xla unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor) else: unique_id = storage_ptr(tensor) return tensor.device, unique_id, storage_size(tensor)
transformers/src/transformers/pytorch_utils.py/0
{ "file_path": "transformers/src/transformers/pytorch_utils.py", "repo_id": "transformers", "token_count": 4640 }
362
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Union import numpy as np import tensorflow as tf from .feature_extraction_utils import BatchFeature from .tokenization_utils_base import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) def shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> List[int]: """ Deal with dynamic shape in tensorflow cleanly. Args: tensor (`tf.Tensor` or `np.ndarray`): The tensor we want the shape of. Returns: `List[int]`: The shape of the tensor as a list. """ if isinstance(tensor, np.ndarray): return list(tensor.shape) dynamic = tf.shape(tensor) if tensor.shape == tf.TensorShape(None): return dynamic static = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(static)] def stable_softmax(logits: tf.Tensor, axis: Optional[int] = None, name: Optional[str] = None) -> tf.Tensor: """ Stable wrapper that returns the same output as `tf.nn.softmax`, but that works reliably with XLA on CPU. It is meant as a workaround for the [following issue](https://github.com/tensorflow/tensorflow/issues/55682), and will be removed after it gets fixed. The arguments and outputs are the same as `tf.nn.softmax`, and relies on the fact that `softmax(x) = softmax(x + c)` (see https://ogunlao.github.io/2020/04/26/you_dont_really_know_softmax.html). Args: logits (`tf.Tensor`): Must be one of the following types: half, float32, float64. axis (`int`, *optional*): The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name (`str`, *optional*): A name for the operation. Returns: `tf.Tensor`: A Tensor. Has the same type and shape as logits. """ # TODO: When the issue linked above gets sorted, add a check on TF version here and use the original function if # it has the fix. After we drop the support for unfixed versions, remove this function. return tf.nn.softmax(logits=logits + 1e-9, axis=axis, name=name) def functional_layernorm(inputs, weight, bias, epsilon=1e-5, axis=-1): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(axis, int): raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis.") # Get mean and variance on the axis to be normalized mean, variance = tf.nn.moments(inputs, axes=[axis], keepdims=True) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis shape = [1] * inputs.shape.rank shape[axis] = shape_list(inputs)[axis] weight = tf.reshape(weight, shape) bias = tf.reshape(bias, shape) # Compute layer normalization using the batch_normalization # function. outputs = tf.nn.batch_normalization( inputs, mean, variance, offset=bias, scale=weight, variance_epsilon=epsilon, ) return outputs def flatten(input, start_dim=0, end_dim=-1): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input in_shape = tf.shape(input) flattened_dim = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1]) out_shape = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0) return tf.reshape(input, out_shape) def invert_attention_mask(encoder_attention_mask: tf.Tensor) -> tf.Tensor: """ Invert an attention mask (e.g., switches 0. and 1.). Args: encoder_attention_mask (`torch.Tensor`): An attention mask. Returns: `tf.Tensor`: The inverted attention mask. """ if not isinstance(encoder_attention_mask, tf.Tensor): encoder_attention_mask = tf.convert_to_tensor(encoder_attention_mask) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) encoder_extended_attention_mask = ( tf.cast(1, encoder_attention_mask.dtype) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def check_embeddings_within_bounds(tensor: tf.Tensor, embed_dim: int, tensor_name: str = "input_ids") -> None: """ `tf.gather`, on which TF embedding layers are based, won't check positive out of bound indices on GPU, returning zeros instead. This function adds a check against that dangerous silent behavior. Args: tensor (`tf.Tensor`): The tensor of indices to check. embed_dim (`int`): The embedding dimension. tensor_name (`str`, *optional*): The name of the tensor to use in the error message. """ tf.debugging.assert_less( tensor, tf.cast(embed_dim, dtype=tensor.dtype), message=( f"The maximum value of {tensor_name} ({tf.math.reduce_max(tensor)}) must be smaller than the embedding " f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time." ), ) def save_attributes_to_hdf5_group(group, name, data): """Saves attributes (data) of the specified name into the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to save. data: Attributes data to store. Raises: RuntimeError: If any single attribute is too large to be saved. Copied from Keras to Transformers to avoid versioning issues. """ HDF5_OBJECT_HEADER_LIMIT = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because " f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " f"bytes: {bad_attributes}" ) data_npy = np.asarray(data) num_chunks = 1 chunked_data = np.array_split(data_npy, num_chunks) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data): num_chunks += 1 chunked_data = np.array_split(data_npy, num_chunks) if num_chunks > 1: for chunk_id, chunk_data in enumerate(chunked_data): group.attrs["%s%d" % (name, chunk_id)] = chunk_data else: group.attrs[name] = data def load_attributes_from_hdf5_group(group, name): """Loads attributes of the specified name from the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to load. Returns: data: Attributes data. Copied from Keras to Transformers to avoid versioning issues. """ if name in group.attrs: data = [n.decode("utf8") if hasattr(n, "decode") else n for n in group.attrs[name]] else: data = [] chunk_id = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("utf8") if hasattr(n, "decode") else n for n in group.attrs["%s%d" % (name, chunk_id)]] ) chunk_id += 1 return data def expand_1d(data): """Expands 1-dimensional `Tensor`s into 2-dimensional `Tensor`s. Copied from Keras to here to avoid versioning issues.""" def _expand_single_1d_tensor(t): if isinstance(t, tf.Tensor) and t.shape.rank == 1: return tf.expand_dims(t, axis=-1) return t return tf.nest.map_structure(_expand_single_1d_tensor, data) def convert_batch_encoding(*args, **kwargs): # Convert HF BatchEncoding/BatchFeature objects in the inputs to dicts that Keras understands if args and isinstance(args[0], (BatchEncoding, BatchFeature)): args = list(args) args[0] = dict(args[0]) elif "x" in kwargs and isinstance(kwargs["x"], (BatchEncoding, BatchFeature)): kwargs["x"] = dict(kwargs["x"]) return args, kwargs
transformers/src/transformers/tf_utils.py/0
{ "file_path": "transformers/src/transformers/tf_utils.py", "repo_id": "transformers", "token_count": 3894 }
363
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class ImageProcessingMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageFeatureExtractionMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BeitFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BeitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BlipImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BridgeTowerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ChineseCLIPFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ChineseCLIPImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class CLIPFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class CLIPImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConditionalDetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConditionalDetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConvNextFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConvNextImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeformableDetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeformableDetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeiTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeiTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DonutFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DonutImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DPTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DPTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class EfficientFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class EfficientNetImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FuyuImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FuyuProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GLPNFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GLPNImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class IdeficsImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageGPTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageGPTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv2FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv3FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv3ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LevitFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LevitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LlavaNextImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Mask2FormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MaskFormerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MaskFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV1FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV1ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV2FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class NougatImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OneFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Owlv2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OwlViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OwlViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PerceiverFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PerceiverImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Pix2StructImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PoolFormerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PoolFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PvtImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SamImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegformerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegformerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegGptImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SiglipImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SuperPointImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Swin2SRImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class TvltImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class TvpImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VideoMAEFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VideoMAEImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTHybridImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VitMatteImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VivitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class YolosFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class YolosImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"])
transformers/src/transformers/utils/dummy_vision_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_vision_objects.py", "repo_id": "transformers", "token_count": 5686 }
364
{ "modelname": "BrandNewBERT", "uppercase_modelname": "BRAND_NEW_BERT", "lowercase_modelname": "brand_new_bert", "camelcase_modelname": "BrandNewBert", "has_slow_class": ["True", "False"], "has_fast_class": ["True", "False"], "slow_tokenizer_use_sentencepiece": ["True", "False"], "authors": "The HuggingFace Team" }
transformers/templates/adding_a_missing_tokenization_test/cookiecutter.json/0
{ "file_path": "transformers/templates/adding_a_missing_tokenization_test/cookiecutter.json", "repo_id": "transformers", "token_count": 129 }
365
# coding=utf-8 # Copyright 2022 {{cookiecutter.authors}} and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for {{cookiecutter.modelname}}.""" {%- if cookiecutter.tokenizer_type == "Based on BERT" %} from ...utils import logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Tokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 512, } PRETRAINED_INIT_CONFIGURATION = { "{{cookiecutter.checkpoint_identifier}}": {"do_lower_case": False}, } class {{cookiecutter.camelcase_modelname}}TokenizerFast(BertTokenizerFast): r""" Construct a "fast" {{cookiecutter.modelname}} tokenizer (backed by HuggingFace's *tokenizers* library). [`~{{cookiecutter.camelcase_modelname}}TokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION slow_tokenizer_class = {{cookiecutter.camelcase_modelname}}Tokenizer {%- elif cookiecutter.tokenizer_type == "Based on BART" %} from ...utils import logging from ..bart.tokenization_bart_fast import BartTokenizerFast from .tokenization_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Tokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.json", }, "merges_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/merges.txt", }, "tokenizer_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 1024, } class {{cookiecutter.camelcase_modelname}}TokenizerFast(BartTokenizerFast): r""" Construct a "fast" {{cookiecutter.modelname}} tokenizer (backed by HuggingFace's *tokenizers* library). [`~{{cookiecutter.camelcase_modelname}}TokenizerFast`] is identical to [`BartTokenizerFast`] and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass [`BartTokenizerFast`] for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = {{cookiecutter.camelcase_modelname}}Tokenizer {%- elif cookiecutter.tokenizer_type == "Standalone" %} from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Tokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/vocab.txt", }, "tokenizer_file": { "{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "{{cookiecutter.checkpoint_identifier}}": 1024, } class {{cookiecutter.camelcase_modelname}}TokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" {{cookiecutter.modelname}} tokenizer (backed by HuggingFace's *tokenizers* library). Args: vocab_file (`str`): Path to the vocabulary file. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = {{cookiecutter.camelcase_modelname}}Tokenizer def __init__( self, vocab_file, merges_file, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, trim_offsets=True, **kwargs ): super().__init__( ByteLevelBPETokenizer( vocab_file=vocab_file, merges_file=merges_file, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, ), bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs, ) self.add_prefix_space = add_prefix_space def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. {{cookiecutter.modelname}} does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] {% endif %}
transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/tokenization_fast_{{cookiecutter.lowercase_modelname}}.py/0
{ "file_path": "transformers/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/tokenization_fast_{{cookiecutter.lowercase_modelname}}.py", "repo_id": "transformers", "token_count": 2997 }
366
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class BenchmarkTest(unittest.TestCase): def check_results_dict_not_empty(self, results): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]): result = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(result) def test_inference_no_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_no_configs_only_pretrain(self): MODEL_ID = "sgugger/tiny-distilbert-classification" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, only_pretrain_model=True, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_torchscript(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, torchscript=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_inference_fp16(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, fp16=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_no_model_no_architectures(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) # set architectures equal to `None` config.architectures = None benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_train_no_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_train_no_configs_fp16(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], fp16=True, multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_inference_with_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_encoder_decoder_with_configs(self): MODEL_ID = "sshleifer/tinier_bart" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_train_with_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_train_encoder_decoder_with_configs(self): MODEL_ID = "sshleifer/tinier_bart" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_save_csv_files(self): MODEL_ID = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=True, save_to_csv=True, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(tmp_dir, "inf_time.csv"), train_memory_csv_file=os.path.join(tmp_dir, "train_mem.csv"), inference_memory_csv_file=os.path.join(tmp_dir, "inf_mem.csv"), train_time_csv_file=os.path.join(tmp_dir, "train_time.csv"), env_info_csv_file=os.path.join(tmp_dir, "env.csv"), multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) benchmark.run() self.assertTrue(Path(os.path.join(tmp_dir, "inf_time.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "train_time.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "inf_mem.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "train_mem.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "env.csv")).exists()) def test_trace_memory(self): MODEL_ID = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(summary): self.assertTrue(hasattr(summary, "sequential")) self.assertTrue(hasattr(summary, "cumulative")) self.assertTrue(hasattr(summary, "current")) self.assertTrue(hasattr(summary, "total")) with tempfile.TemporaryDirectory() as tmp_dir: benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=True, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(tmp_dir, "log.txt"), log_print=True, trace_memory_line_by_line=True, multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) result = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(tmp_dir, "log.txt")).exists())
transformers/tests/benchmark/test_benchmark.py/0
{ "file_path": "transformers/tests/benchmark/test_benchmark.py", "repo_id": "transformers", "token_count": 4937 }
367
{ "feature_extractor_type": "Wav2Vec2FeatureExtractor", "processor_class": "Wav2Vec2Processor" }
transformers/tests/fixtures/preprocessor_config.json/0
{ "file_path": "transformers/tests/fixtures/preprocessor_config.json", "repo_id": "transformers", "token_count": 41 }
368
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeq2SeqLM, TFAutoModelForSpeechSeq2Seq, TFAutoModelForVision2Seq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, ) from transformers.modeling_tf_utils import keras if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class TFGenerationIntegrationTests(unittest.TestCase, GenerationIntegrationTestsMixin): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): framework_dependent_parameters = { "AutoModelForCausalLM": TFAutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeq2Seq, "AutoModelForSeq2SeqLM": TFAutoModelForSeq2SeqLM, "AutoModelForVision2Seq": TFAutoModelForVision2Seq, "LogitsProcessorList": TFLogitsProcessorList, "MinLengthLogitsProcessor": TFMinLengthLogitsProcessor, "create_tensor_fn": tf.convert_to_tensor, "floats_tensor": floats_tensor, "return_tensors": "tf", } @slow def test_generate_tf_function_export_fixed_input_length(self): # TF-only test: tf.saved_model export test_model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_length = 2 max_new_tokens = 2 class DummyModel(tf.Module): def __init__(self, model): super(DummyModel, self).__init__() self.model = model @tf.function( input_signature=( tf.TensorSpec((None, input_length), tf.int32, name="input_ids"), tf.TensorSpec((None, input_length), tf.int32, name="attention_mask"), ), jit_compile=True, ) def serving(self, input_ids, attention_mask): outputs = self.model.generate( input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, return_dict_in_generate=True, ) return {"sequences": outputs["sequences"]} dummy_input_ids = [[2, 0], [102, 103]] dummy_attention_masks = [[1, 0], [1, 1]] dummy_model = DummyModel(model=test_model) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(dummy_model, tmp_dir, signatures={"serving_default": dummy_model.serving}) serving_func = tf.saved_model.load(tmp_dir).signatures["serving_default"] for batch_size in range(1, len(dummy_input_ids) + 1): inputs = { "input_ids": tf.constant(dummy_input_ids[:batch_size]), "attention_mask": tf.constant(dummy_attention_masks[:batch_size]), } tf_func_outputs = serving_func(**inputs)["sequences"] tf_model_outputs = test_model.generate(**inputs, max_new_tokens=max_new_tokens) tf.debugging.assert_equal(tf_func_outputs, tf_model_outputs) @slow def test_generate_tf_function_export_fixed_batch_size(self): # TF-only test: tf.saved_model export test_model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") batch_size = 1 max_new_tokens = 2 class DummyModel(tf.Module): def __init__(self, model): super(DummyModel, self).__init__() self.model = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None), tf.int32, name="input_ids"), tf.TensorSpec((batch_size, None), tf.int32, name="attention_mask"), ), jit_compile=True, ) def serving(self, input_ids, attention_mask): outputs = self.model.generate( input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, return_dict_in_generate=True, ) return {"sequences": outputs["sequences"]} dummy_input_ids = [[2], [102, 103]] dummy_attention_masks = [[1], [1, 1]] dummy_model = DummyModel(model=test_model) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(dummy_model, tmp_dir, signatures={"serving_default": dummy_model.serving}) serving_func = tf.saved_model.load(tmp_dir).signatures["serving_default"] for input_row in range(len(dummy_input_ids)): inputs = { "input_ids": tf.constant([dummy_input_ids[input_row]]), "attention_mask": tf.constant([dummy_attention_masks[input_row]]), } tf_func_outputs = serving_func(**inputs)["sequences"] tf_model_outputs = test_model.generate(**inputs, max_new_tokens=max_new_tokens) tf.debugging.assert_equal(tf_func_outputs, tf_model_outputs) @slow @require_tensorflow_text def test_generate_tf_function_export_with_tf_tokenizer(self): # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="google/flan-t5-small", filename="spiece.model", local_dir=tmp_dir) class CompleteSentenceTransformer(keras.layers.Layer): def __init__(self): super().__init__() self.tokenizer = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(tmp_dir, "spiece.model"), "rb").read() ) self.model = TFAutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5") def call(self, inputs, *args, **kwargs): tokens = self.tokenizer.tokenize(inputs) input_ids, attention_mask = text.pad_model_inputs( tokens, max_seq_length=64, pad_value=self.model.config.pad_token_id ) outputs = self.model.generate(input_ids=input_ids, attention_mask=attention_mask) return self.tokenizer.detokenize(outputs) complete_model = CompleteSentenceTransformer() inputs = keras.layers.Input(shape=(1,), dtype=tf.string, name="inputs") outputs = complete_model(inputs) keras_model = keras.Model(inputs, outputs) keras_model.save(tmp_dir) def test_eos_token_id_int_and_list_top_k_top_sampling(self): # Has PT equivalent: this test relies on random sampling generation_kwargs = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } expectation = 14 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors="tf") model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") eos_token_id = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0"): tf.random.set_seed(0) generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) eos_token_id = [638, 198] with tf.device(":/CPU:0"): tf.random.set_seed(0) generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_model_kwarg_encoder_signature_filtering(self): # Has PT equivalent: ample use of framework-specific code bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Hugging Face is a technology company based in New York and Paris.""" input_ids = bart_tokenizer(article, return_tensors="tf").input_ids bart_model = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart") output = bart_model.generate(input_ids).numpy() # Let's create a fake model that has a different signature. In particular, this fake model accepts "foo" as an # argument. Because "foo" is not in the encoder signature and doesn't start with "decoder_", it will be part of # the encoder kwargs prior to signature filtering, which would lead to an exception. But filtering kicks in and # saves the day. class FakeBart(TFBartForConditionalGeneration): def call(self, input_ids, foo=None, **kwargs): return super().call(input_ids, **kwargs) bart_model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart") fake_output = bart_model.generate(input_ids, foo="bar").numpy() self.assertTrue(np.array_equal(output, fake_output)) # Encoder signature filtering only kicks in if it doesn't accept wildcard kwargs. The following test will fail # because it doesn't do signature filtering. class FakeEncoder(bart_model.model.encoder.__class__): def call(self, input_ids, **kwargs): return super().call(input_ids, **kwargs) fake_encoder = FakeEncoder(bart_model.config, bart_model.model.shared) bart_model.model.encoder = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) fake_output = bart_model.generate(input_ids).numpy() with self.assertRaises(ValueError): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(input_ids, foo="bar")
transformers/tests/generation/test_tf_utils.py/0
{ "file_path": "transformers/tests/generation/test_tf_utils.py", "repo_id": "transformers", "token_count": 5122 }
369
# coding=utf-8 # Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch BART model. """ import copy import tempfile import unittest import timeout_decorator # noqa from transformers import BartConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoModelForSequenceClassification, BartForCausalLM, BartForConditionalGeneration, BartForQuestionAnswering, BartForSequenceClassification, BartModel, BartTokenizer, pipeline, ) from transformers.models.bart.modeling_bart import BartDecoder, BartEncoder, shift_tokens_right def prepare_bart_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class BartModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id # forcing a certain token to be generated, sets all other tokens to -inf # if however the token to be generated is already at -inf then it can lead token # `nan` values and thus break generation self.forced_bos_token_id = None self.forced_eos_token_id = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_bart_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return BartConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, forced_bos_token_id=self.forced_bos_token_id, forced_eos_token_id=self.forced_eos_token_id, ) def get_pipeline_config(self): config = self.get_config() config.max_position_embeddings = 100 config.vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BartModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = BartModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = BartEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = BartDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class BartHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): input_ids = torch.tensor( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=torch.long, device=torch_device, ) batch_size = input_ids.shape[0] config = BartConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size def test_sequence_classification_forward(self): config, input_ids, batch_size = self._get_config_and_data() labels = _long_tensor([2] * batch_size).to(torch_device) model = BartForSequenceClassification(config) model.to(torch_device) outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=labels) expected_shape = torch.Size((batch_size, config.num_labels)) self.assertEqual(outputs["logits"].shape, expected_shape) self.assertIsInstance(outputs["loss"].item(), float) def test_question_answering_forward(self): config, input_ids, batch_size = self._get_config_and_data() sequence_labels = ids_tensor([batch_size], 2).to(torch_device) model = BartForQuestionAnswering(config) model.to(torch_device) outputs = model( input_ids=input_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.assertEqual(outputs["start_logits"].shape, input_ids.shape) self.assertEqual(outputs["end_logits"].shape, input_ids.shape) self.assertIsInstance(outputs["loss"].item(), float) @timeout_decorator.timeout(1) def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size).to(torch_device) lm_model = BartForConditionalGeneration(config) lm_model.to(torch_device) outputs = lm_model(input_ids=input_ids, labels=lm_labels) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) self.assertIsInstance(outputs["loss"].item(), float) def test_lm_uneven_forward(self): config = BartConfig( vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, ) lm_model = BartForConditionalGeneration(config).to(torch_device) context = torch.tensor( [[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long ) summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long) outputs = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_generate_beam_search(self): input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], device=torch_device, dtype=torch.long) config = BartConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) lm_model = BartForConditionalGeneration(config).to(torch_device) lm_model.eval() max_length = 5 generated_ids = lm_model.generate( input_ids.clone(), do_sample=True, num_return_sequences=1, num_beams=2, no_repeat_ngram_size=3, max_length=max_length, ) self.assertEqual(generated_ids.shape, (input_ids.shape[0], max_length)) def test_shift_tokens_right(self): input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long) shifted = shift_tokens_right(input_ids, 1, 2) n_pad_before = input_ids.eq(1).float().sum() n_pad_after = shifted.eq(1).float().sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(torch.eq(shifted[:, 0], 2).all()) @slow def test_tokenization(self): tokenizer = BartTokenizer.from_pretrained("facebook/bart-large") examples = [" Hello world", " DomDramg"] # need leading spaces for equality fairseq_results = [ torch.tensor([0, 20920, 232, 2]), torch.tensor([0, 11349, 495, 4040, 571, 2]), ] for ex, desired_result in zip(examples, fairseq_results): bart_toks = tokenizer.encode(ex, return_tensors="pt").squeeze() assert_tensors_close(desired_result.long(), bart_toks, prefix=ex) @require_torch_fp16 def test_generate_fp16(self): config, input_ids, batch_size = self._get_config_and_data() attention_mask = input_ids.ne(1).to(torch_device) model = BartForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_dummy_inputs(self): config, *_ = self._get_config_and_data() model = BartForConditionalGeneration(config).eval().to(torch_device) model(**model.dummy_inputs) def test_resize_tokens_embeddings_more(self): config, input_ids, _ = self._get_config_and_data() def _get_embs(m): return (m.get_input_embeddings().weight.data.clone(), m.get_output_embeddings().weight.data.clone()) model = BartForConditionalGeneration(config).eval().to(torch_device) input, output = _get_embs(model) self.assertTrue(torch.eq(input, output).all()) new_vocab_size = 45 model.resize_token_embeddings(new_vocab_size) input_new, output_new = _get_embs(model) self.assertEqual(input_new.shape, (new_vocab_size, config.d_model)) self.assertEqual(output_new.shape, (new_vocab_size, config.d_model)) self.assertTrue(torch.eq(input_new, output_new).all()) @require_torch class BartModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (BartModel, BartForConditionalGeneration, BartForSequenceClassification, BartForQuestionAnswering) if is_torch_available() else () ) all_generative_model_classes = (BartForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": BartForConditionalGeneration, "feature-extraction": BartModel, "fill-mask": BartForConditionalGeneration, "question-answering": BartForQuestionAnswering, "summarization": BartForConditionalGeneration, "text-classification": BartForSequenceClassification, "text-generation": BartForCausalLM, "text2text-generation": BartForConditionalGeneration, "translation": BartForConditionalGeneration, "zero-shot": BartForSequenceClassification, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False # Fix me Michael test_pruning = False def setUp(self): self.model_tester = BartModelTester(self) self.config_tester = ConfigTester(self, config_class=BartConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) # BartForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (BartModel, BartForConditionalGeneration, BartForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = BartForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @unittest.skip("Does not support conversations.") def test_pipeline_conversational(self): pass def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) @require_torch @slow class FastIntegrationTests(unittest.TestCase): """These tests are useful for debugging since they operate on a model with 1 encoder layer and 1 decoder layer.""" @cached_property def tok(self): return BartTokenizer.from_pretrained("facebook/bart-large") @cached_property def xsum_1_1_model(self): return BartForConditionalGeneration.from_pretrained("sshleifer/distilbart-xsum-1-1") def test_xsum_1_1_generation(self): hf = self.xsum_1_1_model tok = self.tok ARTICLE = ( "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes." ) EXPECTED = ( " The International Criminal Court (ICC) has announced that it has been announced by the International" " Criminal court." ) dct = tok(ARTICLE, return_tensors="pt") generated_ids = hf.generate(**dct, num_beams=4) result = tok.batch_decode(generated_ids, skip_special_tokens=True)[0] assert EXPECTED == result def test_xsum_1_1_batch_generation(self): # test batch batch = self.tok( [ "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories." " The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is" " based. The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted" ' its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including' ' East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination' " into the situation in Palestinian territories, paving the way for possible war crimes investigations" " against Israelis. As members of the court, Palestinians may be subject to counter-charges as well." " Israel and the United States, neither of which is an ICC member, opposed the Palestinians' efforts" " to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony," ' said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome' ' Statute today, the world is also a step closer to ending a long era of impunity and injustice," he' ' said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of' ' justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was' ' just the first step for the Palestinians. "As the Rome Statute today enters into force for the State' " of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a" ' State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she' ' said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize' " Palestine for joining the ICC should immediately end their pressure, and countries that support" " universal acceptance of the court's treaty should speak out to welcome its membership,\" said" " Balkees Jarrah, international justice counsel for the group. \"What's objectionable is the attempts" " to undermine international justice, not Palestine's decision to join a treaty to which over 100" ' countries around the world are members." In January, when the preliminary ICC examination was' " opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was" ' overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s' ' decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we' ' do not believe that it is eligible to join the ICC," the State Department said in a statement. It' ' urged the warring sides to resolve their differences through direct negotiations. "We will continue' ' to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said.' " But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows' " the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor" ' Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality."' " The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The" " inquiry will include alleged war crimes committed since June. The International Criminal Court was" " set up in 2002 to prosecute genocide, crimes against humanity and war crimes.", "The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted" " Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor" ' Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A' " person who has such a video needs to immediately give it to the investigators.\" Robin's comments" " follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the" " French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was" " recovered from a phone at the wreckage site. The two publications described the supposed video, but" " did not post it on their websites. The publications said that they watched the video, which was" " found by a source close to the investigation. \"One can hear cries of 'My God' in several" ' languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps' " of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy" ' shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing' " scene,\" said Julian Reichelt, editor-in-chief of Bild online. An official with France's accident" " investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc" " Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the" ' Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell' ' phones have been collected at the site, he said, but that they "hadn\'t been exploited yet."' " Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute" " in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working" " hand-in-hand with investigators. But none of the cell phones found so far have been sent to the" " institute, Menichini said. Asked whether staff involved in the search could have leaked a memory" ' card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett:' ' Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are' ' "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is' " something we did not know before. ... Overall we can say many things of the investigation weren't" ' revealed by the investigation at the beginning," he said. What was mental state of Germanwings' " co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled" " depression years before he took the controls of Germanwings Flight 9525, which he's accused of" " deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school" ' in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email' " correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa" " said, included medical documents he submitted in connection with resuming his flight training. The" " announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz's battle" " with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa," " whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday" ' as a "swift and seamless clarification" and said it was sharing the information and documents --' " including training and medical records -- with public prosecutors. Spohr traveled to the crash site" " Wednesday, where recovery teams have been working for the past week to recover human remains and" " plane debris scattered across a steep mountainside. He saw the crisis center set up in" " Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving" " families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no" " visible human remains were left at the site but recovery teams would keep searching. French" " President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the" " victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini" " said. Among those personal belongings could be more cell phones belonging to the 144 passengers and" " six crew on board. Check out the latest from our correspondents . The details about Lubitz's" " correspondence with the flight school during his training were among several developments as" " investigators continued to delve into what caused the crash and Lubitz's possible motive for" " downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical" ' certificate, had passed all his examinations and "held all the licenses required." Earlier, a' " spokesman for the prosecutor's office in Dusseldorf, Christoph Kumpa, said medical records reveal" " Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent" " psychotherapy before he got his pilot's license. Kumpa emphasized there's no evidence suggesting" " Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether" " Lubitz feared his medical condition would cause him to lose his pilot's license, a European" ' government official briefed on the investigation told CNN on Tuesday. While flying was "a big part' " of his life,\" the source said, it's only one theory being considered. Another source, a law" " enforcement official briefed on the investigation, also told CNN that authorities believe the" " primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly" " because of his medical problems. Lubitz's girlfriend told investigators he had seen an eye doctor" " and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had" " psychological issues, the European government official said. But no matter what details emerge about" " his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the' " fact that maybe they weren't going to keep doing their job and they're upset about that and so" ' they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels' " entitled to also take that rage and turn it outward on 149 other people who had nothing to do with" " the person's problems.\" Germanwings crash compensation: What we know . Who was the captain of" " Germanwings Flight 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from" " Dusseldorf, while Laura Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff," " Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.", ], return_tensors="pt", padding="longest", truncation=True, ) generated_ids = self.xsum_1_1_model.generate(**batch, num_beams=4) result = self.tok.batch_decode(generated_ids, skip_special_tokens=True) assert ( result[0] == " The International Criminal Court (ICC) has announced that it has been announced by the International" " Criminal court." ) assert ( result[1] == " An investigation into the crash that killed at least 10 people in the French capital has been" " released by the French police investigating the crash." ) def test_encoder_equiv(self): # test batch batch = self.tok( [ "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories." " The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is" " based. The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted" ' its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including' ' East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination' " into the situation in Palestinian territories, paving the way for possible war crimes investigations" " against Israelis. As members of the court, Palestinians may be subject to counter-charges as well." " Israel and the United States, neither of which is an ICC member, opposed the Palestinians' efforts" " to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony," ' said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome' ' Statute today, the world is also a step closer to ending a long era of impunity and injustice," he' ' said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of' ' justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was' ' just the first step for the Palestinians. "As the Rome Statute today enters into force for the State' " of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a" ' State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she' ' said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize' " Palestine for joining the ICC should immediately end their pressure, and countries that support" " universal acceptance of the court's treaty should speak out to welcome its membership,\" said" " Balkees Jarrah, international justice counsel for the group. \"What's objectionable is the attempts" " to undermine international justice, not Palestine's decision to join a treaty to which over 100" ' countries around the world are members." In January, when the preliminary ICC examination was' " opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was" ' overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s' ' decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we' ' do not believe that it is eligible to join the ICC," the State Department said in a statement. It' ' urged the warring sides to resolve their differences through direct negotiations. "We will continue' ' to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said.' " But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows' " the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor" ' Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality."' " The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The" " inquiry will include alleged war crimes committed since June. The International Criminal Court was" " set up in 2002 to prosecute genocide, crimes against humanity and war crimes.", "The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted" " Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor" ' Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A' " person who has such a video needs to immediately give it to the investigators.\" Robin's comments" " follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the" " French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was" " recovered from a phone at the wreckage site. The two publications described the supposed video, but" " did not post it on their websites. The publications said that they watched the video, which was" " found by a source close to the investigation. \"One can hear cries of 'My God' in several" ' languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps' " of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy" ' shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing' " scene,\" said Julian Reichelt, editor-in-chief of Bild online. An official with France's accident" " investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc" " Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the" ' Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell' ' phones have been collected at the site, he said, but that they "hadn\'t been exploited yet."' " Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute" " in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working" " hand-in-hand with investigators. But none of the cell phones found so far have been sent to the" " institute, Menichini said. Asked whether staff involved in the search could have leaked a memory" ' card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett:' ' Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are' ' "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is' " something we did not know before. ... Overall we can say many things of the investigation weren't" ' revealed by the investigation at the beginning," he said. What was mental state of Germanwings' " co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled" " depression years before he took the controls of Germanwings Flight 9525, which he's accused of" " deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school" ' in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email' " correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa" " said, included medical documents he submitted in connection with resuming his flight training. The" " announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz's battle" " with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa," " whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday" ' as a "swift and seamless clarification" and said it was sharing the information and documents --' " including training and medical records -- with public prosecutors. Spohr traveled to the crash site" " Wednesday, where recovery teams have been working for the past week to recover human remains and" " plane debris scattered across a steep mountainside. He saw the crisis center set up in" " Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving" " families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no" " visible human remains were left at the site but recovery teams would keep searching. French" " President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the" " victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini" " said. Among those personal belongings could be more cell phones belonging to the 144 passengers and" " six crew on board. Check out the latest from our correspondents . The details about Lubitz's" " correspondence with the flight school during his training were among several developments as" " investigators continued to delve into what caused the crash and Lubitz's possible motive for" " downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical" ' certificate, had passed all his examinations and "held all the licenses required." Earlier, a' " spokesman for the prosecutor's office in Dusseldorf, Christoph Kumpa, said medical records reveal" " Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent" " psychotherapy before he got his pilot's license. Kumpa emphasized there's no evidence suggesting" " Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether" " Lubitz feared his medical condition would cause him to lose his pilot's license, a European" ' government official briefed on the investigation told CNN on Tuesday. While flying was "a big part' " of his life,\" the source said, it's only one theory being considered. Another source, a law" " enforcement official briefed on the investigation, also told CNN that authorities believe the" " primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly" " because of his medical problems. Lubitz's girlfriend told investigators he had seen an eye doctor" " and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had" " psychological issues, the European government official said. But no matter what details emerge about" " his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the' " fact that maybe they weren't going to keep doing their job and they're upset about that and so" ' they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels' " entitled to also take that rage and turn it outward on 149 other people who had nothing to do with" " the person's problems.\" Germanwings crash compensation: What we know . Who was the captain of" " Germanwings Flight 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from" " Dusseldorf, while Laura Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff," " Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.", ], return_tensors="pt", padding="longest", truncation=True, ) features = self.xsum_1_1_model.get_encoder()(**batch).last_hidden_state expected = [[-0.0828, -0.0251, -0.0674], [0.1277, 0.3311, -0.0255], [0.2613, -0.0840, -0.2763]] assert_tensors_close(features[0, :3, :3], torch.tensor(expected), atol=1e-3) @require_torch @require_sentencepiece @require_tokenizers class BartModelIntegrationTests(unittest.TestCase): @cached_property def default_tokenizer(self): return BartTokenizer.from_pretrained("facebook/bart-large") @slow def test_inference_no_head(self): model = BartModel.from_pretrained("facebook/bart-large").to(torch_device) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = input_ids.ne(model.config.pad_token_id) with torch.no_grad(): output = model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state expected_shape = torch.Size((1, 11, 1024)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-3)) @slow def test_base_mask_filling(self): pbase = pipeline(task="fill-mask", model="facebook/bart-base") src_text = [" I went to the <mask>."] results = [x["token_str"] for x in pbase(src_text)] assert " bathroom" in results @slow def test_large_mask_filling(self): plarge = pipeline(task="fill-mask", model="facebook/bart-large") src_text = [" I went to the <mask>."] results = [x["token_str"] for x in plarge(src_text)] expected_results = [" bathroom", " gym", " wrong", " movies", " hospital"] self.assertListEqual(results, expected_results) @slow def test_mnli_inference(self): example_b = [0, 31414, 232, 328, 740, 1140, 69, 46078, 1588, 2, 1] input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2], example_b]) model = AutoModelForSequenceClassification.from_pretrained("facebook/bart-large-mnli").to( torch_device ) # eval called in from_pre attention_mask = input_ids.ne(model.config.pad_token_id) # Test that model hasn't changed with torch.no_grad(): outputs = model(input_ids=input_ids, attention_mask=attention_mask) batched_logits = outputs.logits expected_shape = torch.Size((2, 3)) self.assertEqual(batched_logits.shape, expected_shape) expected_slice = torch.tensor([[0.1907, 1.4342, -1.0289]], device=torch_device) logits_arr = batched_logits[0].detach() # Test that padding does not change results input_ids_no_pad = _long_tensor([example_b[:-1]]) attention_mask_no_pad = input_ids_no_pad.ne(model.config.pad_token_id) with torch.no_grad(): logits2 = model(input_ids=input_ids_no_pad, attention_mask=attention_mask_no_pad).logits.squeeze() assert_tensors_close(batched_logits[1], logits2, atol=1e-3) assert_tensors_close(expected_slice, logits_arr, atol=1e-3) @slow def test_xsum_summarization_same_as_fairseq(self): model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-xsum").to(torch_device) tok = self.default_tokenizer PGE_ARTICLE = """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""" EXPECTED_SUMMARY = ( "California's largest power company has begun shutting off electricity to thousands of customers in the" " state." ) dct = tok.batch_encode_plus( [PGE_ARTICLE], max_length=1024, padding="max_length", truncation=True, return_tensors="pt", ).to(torch_device) hypotheses_batch = model.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], num_beams=2, max_length=62, min_length=11, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True, decoder_start_token_id=model.config.eos_token_id, ) decoded = tok.batch_decode( hypotheses_batch, skip_special_tokens=True, ) self.assertEqual(EXPECTED_SUMMARY, decoded[0]) def test_xsum_config_generation_params(self): config = BartConfig.from_pretrained("facebook/bart-large-xsum") expected_params = {"num_beams": 6, "do_sample": False, "early_stopping": True, "length_penalty": 1.0} config_params = {k: getattr(config, k, "MISSING") for k, v in expected_params.items()} self.assertDictEqual(expected_params, config_params) @slow def test_cnn_summarization_same_as_fairseq(self): hf = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device) tok = BartTokenizer.from_pretrained("facebook/bart-large") FRANCE_ARTICLE = ( # @noq " Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) SHORTER_ARTICLE = ( " (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) # The below article tests that we don't add any hypotheses outside of the top n_beams IRAN_ARTICLE = ( " (CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) ARTICLE_SUBWAY = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) dct = tok.batch_encode_plus( [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY], max_length=1024, padding="max_length", truncation_strategy="only_first", truncation=True, return_tensors="pt", ) self.assertEqual(1024, dct["input_ids"].shape[1]) hypotheses_batch = hf.generate( input_ids=dct["input_ids"].to(torch_device), attention_mask=dct["attention_mask"].to(torch_device), num_beams=2, ) assert hypotheses_batch[:, 1].eq(0).all().item() EXPECTED = [ "A French prosecutor says he is not aware of any video footage from on board the plane. Two German " "magazines claim to have found a cell phone video showing the crash. The publications say they watched " "the video, which was found by a source close to the investigation. All 150 on board Germanwings Flight " "9525 were killed.", "Palestinian Authority becomes 123rd member of the International Criminal Court. The move gives the court " "jurisdiction over alleged crimes in Palestinian territories. Israel and the United States opposed the " "Palestinians' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki said it was a " "move toward greater justice.", "U.S. and its negotiating partners reached a strong framework agreement with Iran. Peter Bergen: The " "debate that has already begun will likely result in more heat than light. He says critics have made " "dubious assumptions and doubtful assertions. Bergen says the goal was to block Iran from building a " "nuclear weapon.", "Liana Barrientos, 39, has been married 10 times, sometimes within two weeks of each other. Prosecutors " "say the marriages were part of an immigration scam. She pleaded not guilty at State Supreme Court in the " "Bronx on Friday. If convicted, she faces up to four years in prison.", ] generated_summaries = tok.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) assert generated_summaries == EXPECTED @slow def test_contrastive_search_bart(self): article = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device) input_ids = bart_tokenizer( article, add_special_tokens=False, truncation=True, max_length=512, return_tensors="pt" ).input_ids.to(torch_device) outputs = bart_model.generate(input_ids, penalty_alpha=0.5, top_k=5, max_length=64, num_beams=1) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "Liana Barrientos, 39, pleaded not guilty to charges related to false marriage statements. " "Prosecutors say she married at least 10 times, sometimes within two weeks of each other. She is " "accused of being part of an immigration scam to get permanent residency. If convicted, she faces up " "to four years in" ], ) @slow def test_decoder_attention_mask(self): model = BartForConditionalGeneration.from_pretrained("facebook/bart-large", forced_bos_token_id=0).to( torch_device ) tokenizer = self.default_tokenizer sentence = "UN Chief Says There Is No <mask> in Syria" input_ids = tokenizer(sentence, return_tensors="pt").input_ids.to(torch_device) padding_size = 3 decoder_input_ids = torch.tensor( [ [model.config.decoder_start_token_id] + padding_size * [model.config.pad_token_id] + [model.config.bos_token_id] ], dtype=torch.long, device=torch_device, ) decoder_attention_mask = torch.where(decoder_input_ids == model.config.pad_token_id, 0, 1).to(torch_device) generated_ids = model.generate( input_ids=input_ids, use_cache=False, max_new_tokens=20, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) generated_sentence = tokenizer.batch_decode(generated_ids)[0] expected_sentence = "</s><pad><pad><pad><s>UN Chief Says There Is No Plan B for Peace in Syria</s>" self.assertEqual(generated_sentence, expected_sentence) class BartStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = BartConfig( vocab_size=self.vocab_size, d_model=self.d_model, encoder_layers=self.decoder_layers, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, attention_mask, lm_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.decoder_seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = BartDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = BartDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=attn_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class BartStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BartDecoder, BartForCausalLM) if is_torch_available() else () all_generative_model_classes = (BartForCausalLM,) if is_torch_available() else () fx_comptatible = True test_pruning = False is_encoder_decoder = False test_missing_keys = False def setUp( self, ): self.model_tester = BartStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=BartConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients return def test_save_load_fast_init_from_base(self): pass
transformers/tests/models/bart/test_modeling_bart.py/0
{ "file_path": "transformers/tests/models/bart/test_modeling_bart.py", "repo_id": "transformers", "token_count": 35881 }
370
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch BioGPT model. """ import math import unittest from transformers import BioGptConfig, is_sacremoses_available, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class BioGptModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return BioGptConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BioGptModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = BioGptForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_biogpt_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = BioGptModel(config=config) model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_biogpt_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = BioGptModel(config=config).to(torch_device).eval() attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False ): model = BioGptForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def create_and_check_biogpt_weight_initialization(self, config, *args): model = BioGptModel(config) model_std = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01) def create_and_check_biogpt_for_token_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): config.num_labels = self.num_labels model = BioGptForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BioGptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) all_generative_model_classes = (BioGptForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() and is_sacremoses_available() else {} ) test_pruning = False def setUp(self): self.model_tester = BioGptModelTester(self) self.config_tester = ConfigTester(self, config_class=BioGptConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_biogpt_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*config_and_inputs) def test_biogpt_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_biogpt_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*config_and_inputs) def test_biogpt_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*config_and_inputs) def test_biogpt_token_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*config_and_inputs) @slow def test_batch_generation(self): model = BioGptForCausalLM.from_pretrained("microsoft/biogpt") model.to(torch_device) tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt") tokenizer.padding_side = "left" # Define PAD Token = EOS Token = 50256 tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id # use different length sentences to test batching sentences = [ "Hello, my dog is a little", "Today, I", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_model_from_pretrained(self): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BioGptModel.from_pretrained(model_name) self.assertIsNotNone(model) # Copied from tests.models.opt.test_modeling_opt.OPTModelTest.test_opt_sequence_classification_model with OPT->BioGpt,opt->biogpt,prepare_config_and_inputs->prepare_config_and_inputs_for_common def test_biogpt_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = BioGptForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.opt.test_modeling_opt.OPTModelTest.test_opt_sequence_classification_model_for_multi_label with OPT->BioGpt,opt->biogpt,prepare_config_and_inputs->prepare_config_and_inputs_for_common def test_biogpt_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = BioGptForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) @require_torch class BioGptModelIntegrationTest(unittest.TestCase): @slow def test_inference_lm_head_model(self): model = BioGptForCausalLM.from_pretrained("microsoft/biogpt") input_ids = torch.tensor([[2, 4805, 9, 656, 21]]) output = model(input_ids)[0] vocab_size = 42384 expected_shape = torch.Size((1, 5, vocab_size)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_biogpt_generation(self): tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt") model = BioGptForCausalLM.from_pretrained("microsoft/biogpt") model.to(torch_device) torch.manual_seed(0) tokenized = tokenizer("COVID-19 is", return_tensors="pt").to(torch_device) output_ids = model.generate( **tokenized, min_length=100, max_length=1024, num_beams=5, early_stopping=True, ) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STR = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(output_str, EXPECTED_OUTPUT_STR)
transformers/tests/models/biogpt/test_modeling_biogpt.py/0
{ "file_path": "transformers/tests/models/biogpt/test_modeling_biogpt.py", "repo_id": "transformers", "token_count": 8749 }
371
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Blip model. """ import inspect import os import tempfile import unittest import numpy as np import requests from transformers import BlipConfig, BlipTextConfig, BlipVisionConfig from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torch_fp16, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipTextModel, BlipVisionModel, ) from transformers.models.blip.modeling_blip import BLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import BlipProcessor class BlipVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return BlipVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = BlipVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class BlipVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Blip does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (BlipVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = BlipVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="BlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="BlipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BlipVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class BlipTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return BlipTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, ) def create_and_check_model(self, config, input_ids, input_mask): model = BlipTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BlipTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = BlipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BlipTextModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_pt_tf_model_equivalence(self): super().test_pt_tf_model_equivalence(allow_missing_keys=True) class BlipModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = BlipVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = BlipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class BlipModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BlipModel,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": BlipModel, "image-to-text": BlipForConditionalGeneration, "visual-question-answering": BlipForQuestionAnswering, } if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = BlipModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass # override as the `logit_scale` parameter initilization is different for Blip def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # Blip needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save BlipConfig and check if we can load BlipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save BlipConfig and check if we can load BlipTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BlipModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_pt_tf_model_equivalence(self): super().test_pt_tf_model_equivalence(allow_missing_keys=True) class BlipTextRetrievalModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = BlipVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = BlipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict class BlipTextImageModelsModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = BlipVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = BlipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "labels": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict class BlipVQAModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BlipTextModelTester(parent, **text_kwargs) self.vision_model_tester = BlipVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return BlipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = BlipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "labels": input_ids, "decoder_input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_torch @require_vision class BlipVQAModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipForQuestionAnswering,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = BlipVQAModelTester(self) def _prepare_inputs_for_vqa(self): _, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict["labels"] = inputs_dict["input_ids"] inputs_dict["decoder_input_ids"] = inputs_dict["input_ids"] inputs_dict.pop("return_loss") return inputs_dict def test_class_name_consistency(self): """ Tests that all VQA models have a class name that ends with "ForQuestionAnswering" """ for model_class in self.all_model_classes: model = model_class(self.model_tester.get_config()) self.assertTrue( model.__class__.__name__.endswith("ForQuestionAnswering"), f"Class name should end with 'ForVisualQuestionAnswering' got {model.__class__.__name__}", ) def test_training(self): """ Tests that all VQA models can be trained on a single batch """ for model_class in self.all_model_classes: model = model_class(self.model_tester.get_config()).to(torch_device) model.train() loss = model(**self.model_tester.prepare_config_and_inputs_for_common()[1]).loss loss.backward() # verify the gradients are not None for name, param in model.named_parameters(): self.assertIsNotNone(param.grad, f"Gradients should not be None - got {param.grad} for {name}") def test_forward_signature(self): """ Test if the forward function has the expected arguments. """ for model_class in self.all_model_classes: model = model_class(self.model_tester.get_config()) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so args are the first n entries args = list(signature.parameters.keys()) expected_args = [ "input_ids", "attention_mask", "labels", "decoder_input_ids", "decoder_attention_mask", ] for arg in expected_args: self.assertTrue( arg in args, f"Argument {arg} of forward function signature should include {arg}. Found {args}.", ) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass @require_torch class BlipTextRetrievalModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipForImageTextRetrieval,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = BlipTextRetrievalModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids"] if model_class != BlipForConditionalGeneration else ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) # hardcode labels to be the same as input_ids inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) # hardcode labels to be the same as input_ids inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # override as the `logit_scale` parameter initilization is different for Blip def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # Blip needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save BlipConfig and check if we can load BlipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save BlipConfig and check if we can load BlipTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BlipModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class BlipTextImageModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BlipForConditionalGeneration,) if is_torch_available() else () fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = BlipTextImageModelsModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="BlipModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids"] if model_class != BlipForConditionalGeneration else ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) # hardcode labels to be the same as input_ids inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) # hardcode labels to be the same as input_ids inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() # override as the `logit_scale` parameter initilization is different for Blip def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # Blip needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save BlipConfig and check if we can load BlipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save BlipConfig and check if we can load BlipTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BlipModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): url = "https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch @slow class BlipModelIntegrationTest(unittest.TestCase): def test_inference_image_captioning(self): model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(torch_device) processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") image = prepare_img() # image only inputs = processor(images=image, return_tensors="pt").to(torch_device) predictions = model.generate(**inputs) # Test output self.assertEqual(predictions[0].tolist(), [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]) # image and context context = ["a picture of"] inputs = processor(images=image, text=context, return_tensors="pt").to(torch_device) predictions = model.generate(**inputs) # Test output self.assertEqual( predictions[0].tolist(), [30522, 1037, 3861, 1997, 1037, 2450, 1998, 2014, 3899, 2006, 1996, 3509, 102], ) @require_torch_accelerator @require_torch_fp16 def test_inference_image_captioning_fp16(self): model = BlipForConditionalGeneration.from_pretrained( "Salesforce/blip-image-captioning-base", torch_dtype=torch.float16 ).to(torch_device) processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") image = prepare_img() # image only inputs = processor(images=image, return_tensors="pt").to(torch_device, torch.float16) predictions = model.generate(**inputs) # Test output self.assertEqual(predictions[0].tolist(), [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]) # image and context context = ["a picture of"] inputs = processor(images=image, text=context, return_tensors="pt").to(torch_device, torch.float16) predictions = model.generate(**inputs) # Test output self.assertEqual( predictions[0].tolist(), [30522, 1037, 3861, 1997, 1037, 2450, 1998, 2014, 3899, 2006, 1996, 3509, 102], ) def test_inference_vqa(self): model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to(torch_device) processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base") image = prepare_img() text = "how many dogs are in the picture?" inputs = processor(image, text=text, return_tensors="pt").to(torch_device) out = model.generate(**inputs) # Test output self.assertEqual(out[0].tolist(), [30522, 1015, 102]) def test_inference_itm(self): model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco").to(torch_device) processor = BlipProcessor.from_pretrained("Salesforce/blip-itm-base-coco") image = prepare_img() text = "A woman and her dog sitting in a beach" inputs = processor(image, text, return_tensors="pt").to(torch_device) out_itm = model(**inputs) out = model(**inputs, use_itm_head=False) expected_scores = torch.Tensor([[0.0029, 0.9971]]) self.assertTrue(torch.allclose(torch.nn.Softmax()(out_itm[0].cpu()), expected_scores, rtol=1e-3, atol=1e-3)) self.assertTrue(torch.allclose(out[0].cpu(), torch.Tensor([[0.5162]]), rtol=1e-3, atol=1e-3))
transformers/tests/models/blip/test_modeling_blip.py/0
{ "file_path": "transformers/tests/models/blip/test_modeling_blip.py", "repo_id": "transformers", "token_count": 24187 }
372
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Bros model. """ import copy import unittest from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BrosConfig, BrosForTokenClassification, BrosModel, BrosSpadeEEForTokenClassification, BrosSpadeELForTokenClassification, ) from transformers.models.bros.modeling_bros import ( BROS_PRETRAINED_MODEL_ARCHIVE_LIST, ) class BrosModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_bbox_first_token_mask=True, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_bbox_first_token_mask = use_bbox_first_token_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.seq_length, 8], 1) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) bbox_first_token_mask = None if self.use_bbox_first_token_mask: bbox_first_token_mask = torch.ones([self.batch_size, self.seq_length], dtype=torch.bool).to(torch_device) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) token_labels = None if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) initial_token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) subsequent_token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return ( config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ) def get_config(self): return BrosConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model( self, config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ): model = BrosModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ): config.num_labels = self.num_labels model = BrosForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_spade_ee_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ): config.num_labels = self.num_labels model = BrosSpadeEEForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, bbox_first_token_mask=bbox_first_token_mask, token_type_ids=token_type_ids, initial_token_labels=token_labels, subsequent_token_labels=token_labels, ) self.parent.assertEqual(result.initial_token_logits.shape, (self.batch_size, self.seq_length, self.num_labels)) self.parent.assertEqual( result.subsequent_token_logits.shape, (self.batch_size, self.seq_length, self.seq_length + 1) ) def create_and_check_for_spade_el_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ): config.num_labels = self.num_labels model = BrosSpadeELForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, bbox_first_token_mask=bbox_first_token_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.seq_length + 1)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class BrosModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_torchscript = False test_mismatched_shapes = False all_model_classes = ( ( BrosForTokenClassification, BrosSpadeEEForTokenClassification, BrosSpadeELForTokenClassification, BrosModel, ) if is_torch_available() else () ) all_generative_model_classes = () if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": BrosModel, "token-classification": BrosForTokenClassification} if is_torch_available() else {} ) # BROS requires `bbox` in the inputs which doesn't fit into the above 2 pipelines' input formats. # see https://github.com/huggingface/transformers/pull/26294 def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = BrosModelTester(self) self.config_tester = ConfigTester(self, config_class=BrosConfig, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class.__name__ in ["BrosForTokenClassification", "BrosSpadeELForTokenClassification"]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["bbox_first_token_mask"] = torch.ones( [self.model_tester.batch_size, self.model_tester.seq_length], dtype=torch.bool, device=torch_device, ) elif model_class.__name__ in ["BrosSpadeEEForTokenClassification"]: inputs_dict["initial_token_labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["subsequent_token_labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["bbox_first_token_mask"] = torch.ones( [self.model_tester.batch_size, self.model_tester.seq_length], dtype=torch.bool, device=torch_device, ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_multi_gpu_data_parallel_forward(self): super().test_multi_gpu_data_parallel_forward() def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_spade_ee_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_spade_ee_token_classification(*config_and_inputs) def test_for_spade_el_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_spade_el_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in BROS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BrosModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_bros_batch_inputs(): attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) bbox = torch.tensor( [ [ [0.0000, 0.0000, 0.0000, 0.0000], [0.5223, 0.5590, 0.5787, 0.5720], [0.5853, 0.5590, 0.6864, 0.5720], [0.5853, 0.5590, 0.6864, 0.5720], [0.1234, 0.5700, 0.2192, 0.5840], [0.2231, 0.5680, 0.2782, 0.5780], [0.2874, 0.5670, 0.3333, 0.5780], [0.3425, 0.5640, 0.4344, 0.5750], [0.0866, 0.7770, 0.1181, 0.7870], [0.1168, 0.7770, 0.1522, 0.7850], [0.1535, 0.7750, 0.1864, 0.7850], [0.1890, 0.7750, 0.2572, 0.7850], [1.0000, 1.0000, 1.0000, 1.0000], ], [ [0.0000, 0.0000, 0.0000, 0.0000], [0.4396, 0.6720, 0.4659, 0.6850], [0.4698, 0.6720, 0.4843, 0.6850], [0.1575, 0.6870, 0.2021, 0.6980], [0.2047, 0.6870, 0.2730, 0.7000], [0.1299, 0.7010, 0.1430, 0.7140], [0.1299, 0.7010, 0.1430, 0.7140], [0.1562, 0.7010, 0.2441, 0.7120], [0.1562, 0.7010, 0.2441, 0.7120], [0.2454, 0.7010, 0.3150, 0.7120], [0.3176, 0.7010, 0.3320, 0.7110], [0.3333, 0.7000, 0.4029, 0.7140], [1.0000, 1.0000, 1.0000, 1.0000], ], ] ) input_ids = torch.tensor( [ [101, 1055, 8910, 1012, 5719, 3296, 5366, 3378, 2146, 2846, 10807, 13494, 102], [101, 2112, 1997, 3671, 6364, 1019, 1012, 5057, 1011, 4646, 2030, 2974, 102], ] ) return input_ids, bbox, attention_mask @require_torch class BrosModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = BrosModel.from_pretrained("jinho8345/bros-base-uncased").to(torch_device) input_ids, bbox, attention_mask = prepare_bros_batch_inputs() with torch.no_grad(): outputs = model( input_ids.to(torch_device), bbox.to(torch_device), attention_mask=attention_mask.to(torch_device), return_dict=True, ) # verify the logits expected_shape = torch.Size((2, 13, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.3074, 0.1363, 0.3143], [0.0925, -0.1155, 0.1050], [0.0221, 0.0003, 0.1285]] ).to(torch_device) torch.set_printoptions(sci_mode=False) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/bros/test_modeling_bros.py/0
{ "file_path": "transformers/tests/models/bros/test_modeling_bros.py", "repo_id": "transformers", "token_count": 8384 }
373
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch CLAP model. """ import inspect import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import ClapAudioConfig, ClapConfig, ClapProcessor, ClapTextConfig from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapTextModel, ClapTextModelWithProjection, ) from transformers.models.clap.modeling_clap import CLAP_PRETRAINED_MODEL_ARCHIVE_LIST class ClapAudioModelTester: def __init__( self, parent, batch_size=12, image_size=60, num_mel_bins=16, window_size=4, spec_size=64, patch_size=2, patch_stride=2, seq_length=16, freq_ratio=2, num_channels=3, is_training=True, hidden_size=32, patch_embeds_hidden_size=16, projection_dim=32, depths=[2, 2], num_hidden_layers=2, num_heads=[2, 2], intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_mel_bins = num_mel_bins self.window_size = window_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.depths = depths self.num_heads = num_heads self.num_attention_heads = num_heads[0] self.seq_length = seq_length self.spec_size = spec_size self.freq_ratio = freq_ratio self.patch_stride = patch_stride self.patch_embeds_hidden_size = patch_embeds_hidden_size self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, 1, self.hidden_size, self.num_mel_bins]) config = self.get_config() return config, input_features def get_config(self): return ClapAudioConfig( image_size=self.image_size, patch_size=self.patch_size, num_mel_bins=self.num_mel_bins, window_size=self.window_size, num_channels=self.num_channels, hidden_size=self.hidden_size, patch_stride=self.patch_stride, projection_dim=self.projection_dim, depths=self.depths, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, spec_size=self.spec_size, freq_ratio=self.freq_ratio, patch_embeds_hidden_size=self.patch_embeds_hidden_size, ) def create_and_check_model(self, config, input_features): model = ClapAudioModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_features) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_projection(self, config, input_features): model = ClapAudioModelWithProjection(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_features) self.parent.assertEqual(result.audio_embeds.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_features = config_and_inputs inputs_dict = {"input_features": input_features} return config, inputs_dict @require_torch class ClapAudioModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as CLAP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (ClapAudioModel, ClapAudioModelWithProjection) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ClapAudioModelTester(self) self.config_tester = ConfigTester(self, config_class=ClapAudioConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ClapAudioModel does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [2 * self.model_tester.patch_embeds_hidden_size, 2 * self.model_tester.patch_embeds_hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="ClapAudioModel does not output any loss term in the forward pass") def test_retain_grad_hidden_states_attentions(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_features"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) @unittest.skip(reason="ClapAudioModel does not output any loss term in the forward pass") def test_training(self): pass @unittest.skip(reason="ClapAudioModel does not output any loss term in the forward pass") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ClapAudioModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="ClapAudioModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ClapAudioModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_model_with_projection_from_pretrained(self): for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ClapAudioModelWithProjection.from_pretrained(model_name) self.assertIsNotNone(model) self.assertTrue(hasattr(model, "audio_projection")) class ClapTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, projection_hidden_act="relu", ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.projection_hidden_act = projection_hidden_act def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return ClapTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, projection_hidden_act=self.projection_hidden_act, ) def create_and_check_model(self, config, input_ids, input_mask): model = ClapTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_projection(self, config, input_ids, input_mask): model = ClapTextModelWithProjection(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.text_embeds.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class ClapTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ClapTextModel, ClapTextModelWithProjection) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = ClapTextModelTester(self) self.config_tester = ConfigTester(self, config_class=ClapTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) @unittest.skip(reason="ClapTextModel does not output any loss term in the forward pass") def test_training(self): pass @unittest.skip(reason="ClapTextModel does not output any loss term in the forward pass") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ClapTextModel does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ClapTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="ClapTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ClapTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_model_with_projection_from_pretrained(self): for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ClapTextModelWithProjection.from_pretrained(model_name) self.assertIsNotNone(model) self.assertTrue(hasattr(model, "text_projection")) class ClapModelTester: def __init__(self, parent, text_kwargs=None, audio_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if audio_kwargs is None: audio_kwargs = {} self.parent = parent self.text_model_tester = ClapTextModelTester(parent, **text_kwargs) self.audio_model_tester = ClapAudioModelTester(parent, **audio_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): _, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() _, input_features = self.audio_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, input_features def get_config(self): return ClapConfig.from_text_audio_configs( self.text_model_tester.get_config(), self.audio_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, input_features): model = ClapModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, input_features, attention_mask) self.parent.assertEqual( result.logits_per_audio.shape, (self.audio_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.audio_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, input_features = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "input_features": input_features, "return_loss": True, } return config, inputs_dict @require_torch class ClapModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ClapModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": ClapModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = ClapModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="ClapModel does not have input/output embeddings") def test_model_common_attributes(self): pass # override as the `logit_scale` parameter initilization is different for CLAP def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] input_features = inputs_dict["input_features"] # CLAP needs input_features traced_model = torch.jit.trace(model, (input_ids, input_features)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_audio_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save ClapConfig and check if we can load ClapAudioConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) audio_config = ClapAudioConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.audio_config.to_dict(), audio_config.to_dict()) # Save ClapConfig and check if we can load ClapTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = ClapTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ClapModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch class ClapModelIntegrationTest(unittest.TestCase): paddings = ["repeatpad", "repeat", "pad"] def test_integration_unfused(self): EXPECTED_MEANS_UNFUSED = { "repeatpad": 0.0024, "pad": 0.0020, "repeat": 0.0023, } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio_sample = librispeech_dummy[-1] model_id = "laion/clap-htsat-unfused" model = ClapModel.from_pretrained(model_id).to(torch_device) processor = ClapProcessor.from_pretrained(model_id) for padding in self.paddings: inputs = processor(audios=audio_sample["audio"]["array"], return_tensors="pt", padding=padding).to( torch_device ) audio_embed = model.get_audio_features(**inputs) expected_mean = EXPECTED_MEANS_UNFUSED[padding] self.assertTrue( torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3) ) def test_integration_fused(self): EXPECTED_MEANS_FUSED = { "repeatpad": 0.00069, "repeat": 0.00196, "pad": -0.000379, } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio_sample = librispeech_dummy[-1] model_id = "laion/clap-htsat-fused" model = ClapModel.from_pretrained(model_id).to(torch_device) processor = ClapProcessor.from_pretrained(model_id) for padding in self.paddings: inputs = processor( audios=audio_sample["audio"]["array"], return_tensors="pt", padding=padding, truncation="fusion" ).to(torch_device) audio_embed = model.get_audio_features(**inputs) expected_mean = EXPECTED_MEANS_FUSED[padding] self.assertTrue( torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3) ) def test_batched_fused(self): EXPECTED_MEANS_FUSED = { "repeatpad": 0.0010, "repeat": 0.0020, "pad": 0.0006, } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio_samples = [sample["array"] for sample in librispeech_dummy[0:4]["audio"]] model_id = "laion/clap-htsat-fused" model = ClapModel.from_pretrained(model_id).to(torch_device) processor = ClapProcessor.from_pretrained(model_id) for padding in self.paddings: inputs = processor(audios=audio_samples, return_tensors="pt", padding=padding, truncation="fusion").to( torch_device ) audio_embed = model.get_audio_features(**inputs) expected_mean = EXPECTED_MEANS_FUSED[padding] self.assertTrue( torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3) ) def test_batched_unfused(self): EXPECTED_MEANS_FUSED = { "repeatpad": 0.0016, "repeat": 0.0019, "pad": 0.0019, } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio_samples = [sample["array"] for sample in librispeech_dummy[0:4]["audio"]] model_id = "laion/clap-htsat-unfused" model = ClapModel.from_pretrained(model_id).to(torch_device) processor = ClapProcessor.from_pretrained(model_id) for padding in self.paddings: inputs = processor(audios=audio_samples, return_tensors="pt", padding=padding).to(torch_device) audio_embed = model.get_audio_features(**inputs) expected_mean = EXPECTED_MEANS_FUSED[padding] self.assertTrue( torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3) )
transformers/tests/models/clap/test_modeling_clap.py/0
{ "file_path": "transformers/tests/models/clap/test_modeling_clap.py", "repo_id": "transformers", "token_count": 13294 }
374
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch CvT model. """ import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class CvtConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "embed_dim")) self.parent.assertTrue(hasattr(config, "num_heads")) class CvtModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, embed_dim=[16, 32, 48], num_heads=[1, 2, 3], depth=[1, 2, 10], patch_sizes=[7, 3, 3], patch_stride=[4, 2, 2], patch_padding=[2, 1, 1], stride_kv=[2, 2, 2], cls_token=[False, False, True], attention_drop_rate=[0.0, 0.0, 0.0], initializer_range=0.02, layer_norm_eps=1e-12, is_training=True, use_labels=True, num_labels=2, # Check ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_sizes = patch_sizes self.patch_stride = patch_stride self.patch_padding = patch_padding self.is_training = is_training self.use_labels = use_labels self.num_labels = num_labels self.num_channels = num_channels self.embed_dim = embed_dim self.num_heads = num_heads self.stride_kv = stride_kv self.depth = depth self.cls_token = cls_token self.attention_drop_rate = attention_drop_rate self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return CvtConfig( image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = CvtModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) image_size = (self.image_size, self.image_size) height, width = image_size[0], image_size[1] for i in range(len(self.depth)): height = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) width = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = CvtForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class CvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Cvt does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (CvtModel, CvtForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": CvtModel, "image-classification": CvtForImageClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = CvtModelTester(self) self.config_tester = ConfigTester(self, config_class=CvtConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="Cvt does not output attentions") def test_attention_outputs(self): pass @unittest.skip(reason="Cvt does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Cvt does not support input and output embeddings") def test_model_common_attributes(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = len(self.model_tester.depth) self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CvtModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class CvtModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def test_inference_image_classification_head(self): model = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([0.9285, 0.9015, -0.3150]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
transformers/tests/models/cvt/test_modeling_cvt.py/0
{ "file_path": "transformers/tests/models/cvt/test_modeling_cvt.py", "repo_id": "transformers", "token_count": 4333 }
375
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch DecisionTransformer model. """ import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class DecisionTransformerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, act_dim=6, state_dim=17, hidden_size=23, max_length=11, is_training=True, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.act_dim = act_dim self.state_dim = state_dim self.hidden_size = hidden_size self.max_length = max_length self.is_training = is_training def prepare_config_and_inputs(self): states = floats_tensor((self.batch_size, self.seq_length, self.state_dim)) actions = floats_tensor((self.batch_size, self.seq_length, self.act_dim)) rewards = floats_tensor((self.batch_size, self.seq_length, 1)) returns_to_go = floats_tensor((self.batch_size, self.seq_length, 1)) timesteps = ids_tensor((self.batch_size, self.seq_length), vocab_size=1000) attention_mask = random_attention_mask((self.batch_size, self.seq_length)) config = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def get_config(self): return DecisionTransformerConfig( batch_size=self.batch_size, seq_length=self.seq_length, act_dim=self.act_dim, state_dim=self.state_dim, hidden_size=self.hidden_size, max_length=self.max_length, ) def create_and_check_model( self, config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ): model = DecisionTransformerModel(config=config) model.to(torch_device) model.eval() result = model(states, actions, rewards, returns_to_go, timesteps, attention_mask) self.parent.assertEqual(result.state_preds.shape, states.shape) self.parent.assertEqual(result.action_preds.shape, actions.shape) self.parent.assertEqual(result.return_preds.shape, returns_to_go.shape) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) = config_and_inputs inputs_dict = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class DecisionTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DecisionTransformerModel,) if is_torch_available() else () all_generative_model_classes = () pipeline_model_mapping = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids test_generate_without_input_ids = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features test_pruning = False test_resize_embeddings = False test_head_masking = False test_attention_outputs = False test_hidden_states_output = False test_inputs_embeds = False test_model_common_attributes = False test_gradient_checkpointing = False test_torchscript = False def setUp(self): self.model_tester = DecisionTransformerModelTester(self) self.config_tester = ConfigTester(self, config_class=DecisionTransformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DecisionTransformerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @require_torch class DecisionTransformerModelIntegrationTest(unittest.TestCase): @slow def test_autoregressive_prediction(self): """ An integration test that performs autoregressive prediction of state, action and return from a sequence of state, actions and returns. Test is performed over two timesteps. """ NUM_STEPS = 2 # number of steps of autoregressive prediction we will perform TARGET_RETURN = 10 # defined by the RL environment, may be normalized model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert") model = model.to(torch_device) config = model.config torch.manual_seed(0) state = torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32) # env.reset() expected_outputs = torch.tensor( [[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]], device=torch_device ) returns_to_go = torch.tensor(TARGET_RETURN, device=torch_device, dtype=torch.float32).reshape(1, 1, 1) states = state actions = torch.zeros(1, 0, config.act_dim, device=torch_device, dtype=torch.float32) rewards = torch.zeros(1, 0, device=torch_device, dtype=torch.float32) timesteps = torch.tensor(0, device=torch_device, dtype=torch.long).reshape(1, 1) for step in range(NUM_STEPS): actions = torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=torch_device)], dim=1) rewards = torch.cat([rewards, torch.zeros(1, 1, device=torch_device)], dim=1) attention_mask = torch.ones(1, states.shape[1]).to(dtype=torch.long, device=states.device) with torch.no_grad(): _, action_pred, _ = model( states=states, actions=actions, rewards=rewards, returns_to_go=returns_to_go, timesteps=timesteps, attention_mask=attention_mask, return_dict=False, ) self.assertEqual(action_pred.shape, actions.shape) self.assertTrue(torch.allclose(action_pred[0, -1], expected_outputs[step], atol=1e-4)) state, reward, _, _ = ( # env.step(action) torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32), 1.0, False, {}, ) actions[-1] = action_pred[0, -1] states = torch.cat([states, state], dim=1) pred_return = returns_to_go[0, -1] - reward returns_to_go = torch.cat([returns_to_go, pred_return.reshape(1, 1, 1)], dim=1) timesteps = torch.cat( [timesteps, torch.ones((1, 1), device=torch_device, dtype=torch.long) * (step + 1)], dim=1 )
transformers/tests/models/decision_transformer/test_modeling_decision_transformer.py/0
{ "file_path": "transformers/tests/models/decision_transformer/test_modeling_decision_transformer.py", "repo_id": "transformers", "token_count": 4241 }
376
# coding=utf-8 # Copyright 2020 Huggingface # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import DPRConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DPRContextEncoder, DPRQuestionEncoder, DPRReader, DPRReaderTokenizer from transformers.models.dpr.modeling_dpr import ( DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class DPRModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, projection_dim=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.projection_dim = projection_dim def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return DPRConfig( projection_dim=self.projection_dim, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def create_and_check_context_encoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DPRContextEncoder(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size)) def create_and_check_question_encoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DPRQuestionEncoder(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size)) def create_and_check_reader( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DPRReader(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids} return config, inputs_dict @require_torch class DPRModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DPRContextEncoder, DPRQuestionEncoder, DPRReader, ) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": DPRQuestionEncoder} if is_torch_available() else {} test_resize_embeddings = False test_missing_keys = False # why? test_pruning = False test_head_masking = False def setUp(self): self.model_tester = DPRModelTester(self) self.config_tester = ConfigTester(self, config_class=DPRConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_context_encoder_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_context_encoder(*config_and_inputs) def test_question_encoder_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_question_encoder(*config_and_inputs) def test_reader_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_reader(*config_and_inputs) def test_init_changed_config(self): config = self.model_tester.prepare_config_and_inputs()[0] model = DPRQuestionEncoder(config=config) model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = DPRQuestionEncoder.from_pretrained(tmp_dirname, projection_dim=512) self.assertIsNotNone(model) @slow def test_model_from_pretrained(self): for model_name in DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRContextEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRContextEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRQuestionEncoder.from_pretrained(model_name) self.assertIsNotNone(model) for model_name in DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = DPRReader.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class DPRModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base", return_dict=False) model.to(torch_device) input_ids = torch.tensor( [[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]], dtype=torch.long, device=torch_device ) # [CLS] hello, is my dog cute? [SEP] output = model(input_ids)[0] # embedding shape = (1, 768) # compare the actual values for a slice. expected_slice = torch.tensor( [ [ 0.03236253, 0.12753335, 0.16818509, 0.00279786, 0.3896933, 0.24264945, 0.2178971, -0.02335227, -0.08481959, -0.14324117, ] ], dtype=torch.float, device=torch_device, ) self.assertTrue(torch.allclose(output[:, :10], expected_slice, atol=1e-4)) @slow def test_reader_inference(self): tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base") model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base") model.to(torch_device) encoded_inputs = tokenizer( questions="What is love ?", titles="Haddaway", texts="What Is Love is a song recorded by the artist Haddaway", padding=True, return_tensors="pt", ) encoded_inputs.to(torch_device) outputs = model(**encoded_inputs) # compare the actual values for a slice. expected_start_logits = torch.tensor( [[-10.3005, -10.7765, -11.4872, -11.6841, -11.9312, -10.3002, -9.8544, -11.7378, -12.0821, -10.2975]], dtype=torch.float, device=torch_device, ) expected_end_logits = torch.tensor( [[-11.0684, -11.7041, -11.5397, -10.3465, -10.8791, -6.8443, -11.9959, -11.0364, -10.0096, -6.8405]], dtype=torch.float, device=torch_device, ) self.assertTrue(torch.allclose(outputs.start_logits[:, :10], expected_start_logits, atol=1e-4)) self.assertTrue(torch.allclose(outputs.end_logits[:, :10], expected_end_logits, atol=1e-4))
transformers/tests/models/dpr/test_modeling_dpr.py/0
{ "file_path": "transformers/tests/models/dpr/test_modeling_dpr.py", "repo_id": "transformers", "token_count": 5466 }
377
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ElectraConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ) from transformers.models.electra.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST class ElectraModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) fake_token_labels = ids_tensor([self.batch_size, self.seq_length], 1) config = self.get_config() return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ) def get_config(self): return ElectraConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, _, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_electra_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = ElectraModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_electra_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = ElectraModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_electra_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = ElectraForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_electra_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = ElectraForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_electra_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = ElectraForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_electra_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = ElectraForPreTraining(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=fake_token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) def create_and_check_electra_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = ElectraForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_electra_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = ElectraForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_electra_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_choices = self.num_choices model = ElectraForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class ElectraModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ElectraModel, ElectraForPreTraining, ElectraForMaskedLM, ElectraForCausalLM, ElectraForMultipleChoice, ElectraForTokenClassification, ElectraForSequenceClassification, ElectraForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": ElectraModel, "fill-mask": ElectraForMaskedLM, "question-answering": ElectraForQuestionAnswering, "text-classification": ElectraForSequenceClassification, "text-generation": ElectraForCausalLM, "token-classification": ElectraForTokenClassification, "zero-shot": ElectraForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = ElectraModelTester(self) self.config_tester = ConfigTester(self, config_class=ElectraConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_electra_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_model(*config_and_inputs) def test_electra_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_electra_model_as_decoder(*config_and_inputs) def test_electra_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_electra_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_for_token_classification(*config_and_inputs) def test_for_pre_training(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_for_pretraining(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_for_sequence_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_for_question_answering(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ElectraModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_electra_for_causal_lm(*config_and_inputs) @require_torch class ElectraModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = ElectraModel.from_pretrained("google/electra-small-discriminator") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 256)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.4471, 0.6821, -0.3265], [0.4627, 0.5255, -0.3668], [0.4532, 0.3313, -0.4344]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
transformers/tests/models/electra/test_modeling_electra.py/0
{ "file_path": "transformers/tests/models/electra/test_modeling_electra.py", "repo_id": "transformers", "token_count": 8357 }
378
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import random import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import FlavaImageProcessor, FlavaProcessor from transformers.models.flava.image_processing_flava import ( FLAVA_CODEBOOK_MEAN, FLAVA_CODEBOOK_STD, FLAVA_IMAGE_MEAN, FLAVA_IMAGE_STD, ) @require_vision class FlavaProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: skip self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write("".join([x + "\n" for x in vocab_tokens])) image_processor_map = { "image_mean": FLAVA_IMAGE_MEAN, "image_std": FLAVA_IMAGE_STD, "do_normalize": True, "do_resize": True, "size": 224, "do_center_crop": True, "crop_size": 224, "input_size_patches": 14, "total_mask_patches": 75, "mask_group_max_patches": None, "mask_group_min_patches": 16, "mask_group_min_aspect_ratio": 0.3, "mask_group_max_aspect_ratio": None, "codebook_do_resize": True, "codebook_size": 112, "codebook_do_center_crop": True, "codebook_crop_size": 112, "codebook_do_map_pixels": True, "codebook_do_normalize": True, "codebook_image_mean": FLAVA_CODEBOOK_MEAN, "codebook_image_std": FLAVA_CODEBOOK_STD, } self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME) with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) def get_tokenizer(self, **kwargs): return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): return BertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) def get_image_processor(self, **kwargs): return FlavaImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_default(self): tokenizer_slow = self.get_tokenizer() tokenizer_fast = self.get_rust_tokenizer() image_processor = self.get_image_processor() processor_slow = FlavaProcessor(tokenizer=tokenizer_slow, image_processor=image_processor) processor_slow.save_pretrained(self.tmpdirname) processor_slow = FlavaProcessor.from_pretrained(self.tmpdirname, use_fast=False) processor_fast = FlavaProcessor(tokenizer=tokenizer_fast, image_processor=image_processor) processor_fast.save_pretrained(self.tmpdirname) processor_fast = FlavaProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer, BertTokenizer) self.assertIsInstance(processor_fast.tokenizer, BertTokenizerFast) self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor, FlavaImageProcessor) self.assertIsInstance(processor_fast.image_processor, FlavaImageProcessor) def test_save_load_pretrained_additional_features(self): processor = FlavaProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = FlavaProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, BertTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, FlavaImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = FlavaProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) # With rest of the args random.seed(1234) input_feat_extract = image_processor( image_input, return_image_mask=True, return_codebook_pixels=True, return_tensors="np" ) random.seed(1234) input_processor = processor( images=image_input, return_image_mask=True, return_codebook_pixels=True, return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = FlavaProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = FlavaProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "token_type_ids", "attention_mask", "pixel_values"]) # add extra args inputs = processor(text=input_str, images=image_input, return_codebook_pixels=True, return_image_mask=True) self.assertListEqual( list(inputs.keys()), [ "input_ids", "token_type_ids", "attention_mask", "pixel_values", "codebook_pixel_values", "bool_masked_pos", ], ) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = FlavaProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = FlavaProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), processor.model_input_names)
transformers/tests/models/flava/test_processor_flava.py/0
{ "file_path": "transformers/tests/models/flava/test_processor_flava.py", "repo_id": "transformers", "token_count": 4041 }
379
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPT2LMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpt2 import TFGPT2Tokenizer TOKENIZER_CHECKPOINTS = ["openai-community/gpt2"] TINY_MODEL_CHECKPOINT = "openai-community/gpt2" if is_tf_available(): class ModelToSave(tf.Module): def __init__(self, tokenizer): super().__init__() self.tokenizer = tokenizer config = AutoConfig.from_pretrained(TINY_MODEL_CHECKPOINT) self.model = TFGPT2LMHeadModel.from_config(config) @tf.function(input_signature=(tf.TensorSpec((None,), tf.string, name="text"),)) def serving(self, text): tokenized = self.tokenizer(text) input_ids_dense = tokenized["input_ids"].to_tensor() input_mask = tf.cast(input_ids_dense > 0, tf.int32) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) outputs = self.model(input_ids=input_ids_dense, attention_mask=input_mask)["logits"] return outputs @require_tf @require_keras_nlp class GPTTokenizationTest(unittest.TestCase): # The TF tokenizers are usually going to be used as pretrained tokenizers from existing model checkpoints, # so that's what we focus on here. def setUp(self): super().setUp() self.tokenizers = [GPT2Tokenizer.from_pretrained(checkpoint) for checkpoint in (TOKENIZER_CHECKPOINTS)] self.tf_tokenizers = [TFGPT2Tokenizer.from_pretrained(checkpoint) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers) == len(self.tf_tokenizers) self.test_sentences = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] self.paired_sentences = list(zip(self.test_sentences, self.test_sentences[::-1])) def test_output_equivalence(self): for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers): for test_inputs in self.test_sentences: python_outputs = tokenizer([test_inputs], return_tensors="tf") tf_outputs = tf_tokenizer([test_inputs]) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors python_outputs_values = python_outputs[key].numpy() tf_outputs_values = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape)) self.assertTrue(tf.reduce_all(tf.cast(python_outputs_values, tf.int64) == tf_outputs_values)) @slow def test_graph_mode(self): for tf_tokenizer in self.tf_tokenizers: compiled_tokenizer = tf.function(tf_tokenizer) for test_inputs in self.test_sentences: test_inputs = tf.constant(test_inputs) compiled_outputs = compiled_tokenizer(test_inputs) eager_outputs = tf_tokenizer(test_inputs) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def test_saved_model(self): for tf_tokenizer in self.tf_tokenizers: model = ModelToSave(tokenizer=tf_tokenizer) test_inputs = tf.convert_to_tensor([self.test_sentences[0]]) out = model.serving(test_inputs) # Build model with some sample inputs with TemporaryDirectory() as tempdir: save_path = Path(tempdir) / "saved.model" tf.saved_model.save(model, save_path, signatures={"serving_default": model.serving}) loaded_model = tf.saved_model.load(save_path) loaded_output = loaded_model.signatures["serving_default"](test_inputs)["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output)) @slow def test_from_config(self): for tf_tokenizer in self.tf_tokenizers: test_inputs = tf.convert_to_tensor([self.test_sentences[0]]) out = tf_tokenizer(test_inputs) # Build model with some sample inputs config = tf_tokenizer.get_config() model_from_config = TFGPT2Tokenizer.from_config(config) from_config_output = model_from_config(test_inputs) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key])) @slow def test_padding(self): for tf_tokenizer in self.tf_tokenizers: # for the test to run tf_tokenizer.pad_token_id = 123123 for max_length in [3, 5, 1024]: test_inputs = tf.convert_to_tensor([self.test_sentences[0]]) out = tf_tokenizer(test_inputs, max_length=max_length) out_length = out["input_ids"].numpy().shape[1] assert out_length == max_length
transformers/tests/models/gpt2/test_tokenization_gpt2_tf.py/0
{ "file_path": "transformers/tests/models/gpt2/test_tokenization_gpt2_tf.py", "repo_id": "transformers", "token_count": 2530 }
380
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import AutoTokenizer, GPTJConfig, is_tf_available from transformers.testing_utils import require_tf, slow, tooslow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin from ...utils.test_modeling_tf_core import TFCoreModelTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.gptj.modeling_tf_gptj import ( TFGPTJForCausalLM, TFGPTJForQuestionAnswering, TFGPTJForSequenceClassification, TFGPTJModel, shape_list, ) class TFGPTJModelTester: def __init__(self, parent): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_token_type_ids = True self.use_input_mask = True self.use_labels = True self.use_mc_token_ids = True self.vocab_size = 99 self.hidden_size = 32 self.rotary_dim = 4 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None self.bos_token_id = self.vocab_size - 1 self.eos_token_id = self.vocab_size - 1 self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = GPTJConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, return_dict=True, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def create_and_check_gptj_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFGPTJModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) inputs = [input_ids, None, input_mask] # None is the input for 'past' result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_gptj_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFGPTJModel(config=config) # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) # append to next input_ids and token_type_ids next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_token_type_ids = tf.concat([token_type_ids, next_token_types], axis=-1) output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_gptj_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = TFGPTJModel(config=config) # create attention mask half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) # first forward pass output, past_key_values = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) # append to next input_ids and attn_mask next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat([attn_mask, tf.ones((shape_list(attn_mask)[0], 1), dtype=tf.int32)], axis=1) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-12) def create_and_check_gptj_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = TFGPTJModel(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] token_type_ids = token_type_ids[:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_token_types = ids_tensor((self.batch_size, 3), self.type_vocab_size) # append to next input_ids and token_type_ids next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) next_token_type_ids = tf.concat([token_type_ids, next_token_types], axis=-1) output_from_no_past = model( next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past_key_values, )["last_hidden_state"] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_gptj_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFGPTJForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class TFGPTJModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFGPTJForCausalLM, TFGPTJForSequenceClassification, TFGPTJForQuestionAnswering, TFGPTJModel) if is_tf_available() else () ) all_generative_model_classes = (TFGPTJForCausalLM,) if is_tf_available() else () pipeline_model_mapping = ( { "feature-extraction": TFGPTJModel, "question-answering": TFGPTJForQuestionAnswering, "text-classification": TFGPTJForSequenceClassification, "text-generation": TFGPTJForCausalLM, "zero-shot": TFGPTJForSequenceClassification, } if is_tf_available() else {} ) test_onnx = False test_pruning = False test_missing_keys = False test_head_masking = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def setUp(self): self.model_tester = TFGPTJModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTJConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_gptj_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_model(*config_and_inputs) def test_gptj_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_model_past(*config_and_inputs) def test_gptj_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_model_attention_mask_past(*config_and_inputs) def test_gptj_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_model_past_large_inputs(*config_and_inputs) def test_gptj_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_lm_head_model(*config_and_inputs) @slow @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) > 0, "skip testing on GPU for now to avoid GPU OOM.", ) def test_model_from_pretrained(self): model = TFGPTJModel.from_pretrained("EleutherAI/gpt-j-6B", from_pt=True) self.assertIsNotNone(model) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor.") def test_resize_token_embeddings(self): super().test_resize_token_embeddings() @require_tf @tooslow # Marked as @tooslow due to GPU OOM -- but still useful to run locally. Requires ~39GB of RAM. class TFGPTJModelLanguageGenerationTest(unittest.TestCase): def test_lm_generate_gptj(self): model = TFGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", from_pt=True) input_ids = tf.convert_to_tensor([[464, 3290]], dtype=tf.int32) # The dog # The dog is a man's best friend. It is a loyal companion, and it is a friend expected_output_ids = [464, 3290, 318, 257, 582, 338, 1266, 1545, 13, 632, 318, 257, 9112, 15185, 11, 290, 340, 318, 257, 1545] # fmt: skip output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) def test_gptj_sample(self): tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B", revision="float16") model = TFGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", revision="float16", from_pt=True) tokenized = tokenizer("Today is a nice day and", return_tensors="tf") # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0"): output_ids = model.generate(**tokenized, do_sample=True, seed=[42, 0]) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STR = "Today is a nice day and I’m going to go for a walk. I’" self.assertEqual(output_str, EXPECTED_OUTPUT_STR) def _get_beam_search_test_objects(self): model = TFGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", revision="float16", from_pt=True) tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B", revision="float16") tokenizer.padding_side = "left" # Define PAD Token = EOS Token = 50256 tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id # use different length sentences to test batching sentences = [ "Hello, my dog is a little", "Today, I", ] expected_output_sentences = [ "Hello, my dog is a little over a year old and has been diagnosed with hip dysplasia", "Today, I’m going to be talking about a topic that’", ] return model, tokenizer, sentences, expected_output_sentences def test_batch_beam_search(self): # Confirms that we get the expected results with left-padded beam search model, tokenizer, sentences, expected_output_sentences = self._get_beam_search_test_objects() inputs = tokenizer(sentences, return_tensors="tf", padding=True) outputs = model.generate(**inputs, do_sample=False, num_beams=2) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(expected_output_sentences, batch_out_sentence) def test_batch_left_padding(self): # Confirms that left-padding is working properly model, tokenizer, sentences, expected_output_sentences = self._get_beam_search_test_objects() inputs = tokenizer(sentences, return_tensors="tf", padding=True) inputs_non_padded = tokenizer(sentences[0], return_tensors="tf") output_non_padded = model.generate(**inputs_non_padded, do_sample=False, num_beams=2) num_paddings = ( shape_list(inputs_non_padded["input_ids"])[-1] - tf.reduce_sum(tf.cast(inputs["attention_mask"][-1], tf.int64)).numpy() ) inputs_padded = tokenizer(sentences[1], return_tensors="tf") output_padded = model.generate( **inputs_padded, do_sample=False, num_beams=2, max_length=model.config.max_length - num_paddings ) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) self.assertListEqual(expected_output_sentences, [non_padded_sentence, padded_sentence]) def test_xla_beam_search(self): # Confirms that XLA is working properly model, tokenizer, sentences, expected_output_sentences = self._get_beam_search_test_objects() inputs = tokenizer(sentences, return_tensors="tf", padding=True) xla_generate = tf.function(model.generate, jit_compile=True) outputs_xla = xla_generate(**inputs, do_sample=False, num_beams=2) xla_sentence = tokenizer.batch_decode(outputs_xla, skip_special_tokens=True) self.assertListEqual(expected_output_sentences, xla_sentence)
transformers/tests/models/gptj/test_modeling_tf_gptj.py/0
{ "file_path": "transformers/tests/models/gptj/test_modeling_tf_gptj.py", "repo_id": "transformers", "token_count": 8856 }
381
# coding=utf-8 # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch KOSMOS-2 model. """ import copy import inspect import os import tempfile import unittest import numpy as np import requests from transformers import AutoModelForVision2Seq, AutoProcessor, Kosmos2Config from transformers.models.kosmos2.configuration_kosmos2 import Kosmos2TextConfig, Kosmos2VisionConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import Kosmos2ForConditionalGeneration, Kosmos2Model from transformers.models.kosmos2.modeling_kosmos2 import KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class Kosmos2VisionModelTester: def __init__( self, parent, batch_size=12, image_size=32, patch_size=4, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return Kosmos2VisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict class Kosmos2TextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return Kosmos2TextConfig( vocab_size=self.vocab_size, embed_dim=self.hidden_size, layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict class Kosmos2ModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, latent_query_num=3, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = Kosmos2TextModelTester(parent, **text_kwargs) self.vision_model_tester = Kosmos2VisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.latent_query_num = latent_query_num self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() # build `image_embeds_position_mask` image_embeds_position_mask = torch.zeros_like(input_ids) image_embeds_position_mask[:, 1 : 1 + self.latent_query_num :] = 1 config = self.get_config() return config, input_ids, attention_mask, image_embeds_position_mask, pixel_values def get_config(self): return Kosmos2Config( self.text_model_tester.get_config().to_dict(), self.vision_model_tester.get_config().to_dict(), latent_query_num=self.latent_query_num, ) def create_and_check_model(self, config, input_ids, attention_mask, image_embeds_position_mask, pixel_values): model = Kosmos2Model(config).to(torch_device).eval() with torch.no_grad(): result = model(pixel_values, input_ids, image_embeds_position_mask, attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.text_model_tester.batch_size, self.text_model_tester.seq_length, self.text_model_tester.hidden_size), ) self.parent.assertEqual( result.image_embeds.shape, (self.text_model_tester.batch_size, self.latent_query_num, self.text_model_tester.hidden_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, image_embeds_position_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "image_embeds_position_mask": image_embeds_position_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_torch class Kosmos2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Kosmos2Model, Kosmos2ForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (Kosmos2ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": Kosmos2Model, "image-to-text": Kosmos2ForConditionalGeneration} if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False # TODO: `image-to-text` pipeline for this model needs Processor. def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return pipeline_test_casse_name == "ImageToTextPipelineTests" def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class.__name__ == "Kosmos2ForConditionalGeneration": inputs_dict["labels"] = torch.zeros( (self.model_tester.text_model_tester.batch_size, self.model_tester.text_model_tester.seq_length), dtype=torch.long, device=torch_device, ) return inputs_dict def setUp(self): self.model_tester = Kosmos2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Kosmos2Config, hidden_size=37) # overwrite from common to skip `image_to_text_projection.latent_query` def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "image_to_text_projection.latent_query": # The original code use ` nn.Parameter(torch.randn(...))` for which this test won't pass. continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_load_save_without_tied_weights(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.text_config.tie_word_embeddings = False for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as d: model.save_pretrained(d) model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) # Checking the state dicts are correct reloaded_state = model_reloaded.state_dict() for k, v in model.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) # Checking there was no complain of missing weights self.assertEqual(infos["missing_keys"], []) # overwrite from common in order to use `self.model_tester.text_model_tester.num_hidden_layers` def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.text_model_tester.num_hidden_layers + 1, ) self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.text_model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.text_model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # overwrite from common in order to use `config.text_config.vocab_size` instead of `config.vocab_size` def test_tie_model_weights(self): if not self.test_torchscript: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_same_values(layer_1, layer_2): equal = True for p1, p2 in zip(layer_1.weight, layer_2.weight): if p1.data.ne(p2.data).sum() > 0: equal = False return equal for model_class in self.all_model_classes: config.torchscript = True model_not_tied = model_class(config) if model_not_tied.get_output_embeddings() is None: continue config_tied = copy.deepcopy(config) config_tied.torchscript = False model_tied = model_class(config_tied) params_tied = list(model_tied.parameters()) # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(check_same_values(embeddings, decoding)) # # Check that after modification, they remain the same. # embeddings.weight.data.div_(2) # # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(embeddings.weight.shape, decoding.weight.shape) # self.assertTrue(check_same_values(embeddings, decoding)) # # Check that after modification, they remain the same. # decoding.weight.data.div_(4) # # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(embeddings.weight.shape, decoding.weight.shape) # self.assertTrue(check_same_values(embeddings, decoding)) # Check that after resize they remain tied. model_tied.resize_token_embeddings(config.text_config.vocab_size + 10) params_tied_2 = list(model_tied.parameters()) self.assertEqual(len(params_tied_2), len(params_tied)) # decoding.weight.data.mul_(20) # # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape) # self.assertTrue(check_same_values(model.transformer.wte, model.lm_head)) @slow def test_model_from_pretrained(self): for model_name in KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Kosmos2Model.from_pretrained(model_name) self.assertIsNotNone(model) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: main_input = inputs[main_input_name] model(main_input, inputs["input_ids"], inputs["image_embeds_position_mask"]) traced_model = torch.jit.trace( model, (main_input, inputs["input_ids"], inputs["image_embeds_position_mask"]) ) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() # We will verify our results on an image of cute cats def prepare_img(): url = "https://huggingface.co/hf-internal-testing/Kosmos2-test-image/resolve/main/demo.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch @slow class Kosmos2ModelIntegrationTest(unittest.TestCase): def run_example(self, prompt, image, model, processor): inputs = processor(text=prompt, images=image, return_tensors="pt", padding=True).to(torch_device) generation_outputs = model.generate( pixel_values=inputs["pixel_values"], input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], image_embeds=None, image_embeds_position_mask=inputs["image_embeds_position_mask"], use_cache=True, max_new_tokens=128, output_scores=True, return_dict_in_generate=True, ) scores = generation_outputs.scores generated_ids = generation_outputs.sequences generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True) # Specify `cleanup_and_extract=False` in order to see the raw model generation. processed_text = [processor.post_process_generation(x, cleanup_and_extract=False) for x in generated_text] # By default, the generated text is cleanup and the entities are extracted. final_text_with_entities = [processor.post_process_generation(x) for x in generated_text] return scores, generated_ids, generated_text, processed_text, final_text_with_entities def test_snowman_image_captioning(self): url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.png" image = Image.open(requests.get(url, stream=True).raw) image.save("new_image.jpg") image = Image.open("new_image.jpg") model = AutoModelForVision2Seq.from_pretrained("microsoft/kosmos-2-patch14-224").to(torch_device) processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") prompt = "<grounding>An image of" scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example( prompt, image, model, processor ) processed_text = processed_text[0] final_text, entities = final_text_with_entities[0] np.testing.assert_allclose( torch.concat(scores[1:4])[:3, :3].to("cpu").numpy(), np.array( [ [-1.5672581195831299, -5.007406711578369, 4.36448860168457], [-2.147017002105713, -4.966302871704102, 4.592559337615967], [-0.9352350831031799, -4.688288688659668, 6.240612983703613], ] ), atol=1e-5, ) np.testing.assert_allclose( torch.concat(scores[-3:])[-3:, -3:].to("cpu").numpy(), np.array( [ [2.9916205406188965, 2.481820583343506, 4.646594524383545], [-2.8381078243255615, -2.9687185287475586, -2.6926779747009277], [-2.8909168243408203, -3.2228589057922363, -1.7056822776794434], ] ), atol=1e-5, ) # fmt: off EXPECTED_IDS = [ [ 0, 64003, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 64004, 64012, 712, 1648, 9, 64007, 10, 43867, 64008, 64009, 64057, 64876, 64010, 5950, 597, 32, 64007, 10, 646, 64008, 64009, 64018, 64924, 64010, 4, 2 ] ] # fmt: on self.assertListEqual(generated_ids.to("cpu").numpy().tolist(), EXPECTED_IDS) EXPECTED_PROCESSED_TEXT = ( "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> " "warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>." ) self.assertEqual(processed_text, EXPECTED_PROCESSED_TEXT) self.assertEqual(final_text, "An image of a snowman warming himself by a fire.") EXPECTED_ENTITIES = [ ("a snowman", (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ("a fire", (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)]), ] self.assertListEqual(entities, EXPECTED_ENTITIES) # test with the detail caption generation prompt = "<grounding>Describe this image in detail:" scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example( prompt, image, model, processor ) processed_text = processed_text[0] final_text, entities = final_text_with_entities[0] np.testing.assert_allclose( torch.concat(scores[1:4])[:3, :3].to("cpu").numpy(), np.array( [ [-0.9093570113182068, -4.578373908996582, 5.96360969543457], [2.452126979827881, -4.090598106384277, 8.738677024841309], [-0.7624598741531372, -4.771658897399902, 6.576295852661133], ] ), atol=1e-5, ) np.testing.assert_allclose( torch.concat(scores[-3:])[-3:, -3:].to("cpu").numpy(), np.array( [ [-1.673659086227417, -2.162452220916748, -1.95430588722229], [-2.006824493408203, -2.2038745880126953, -1.24686861038208], [-3.2783470153808594, -2.814181089401245, -1.390632152557373], ] ), atol=1e-5, ) # fmt: off EXPECTED_IDS_LONG = [ [ 0, 64003, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 64004, 64012, 34645, 247, 38, 1648, 12, 3391, 55, 24, 1648, 1338, 10, 43867, 1280, 32, 64007, 10, 30879, 64008, 64009, 64018, 65020, 64010, 12, 5, 1842, 4, 71, 17, 1679, 64007, 10, 3958, 64008, 64009, 64061, 64263, 64010, 6, 64007, 15719, 64008, 64009, 64253, 64617, 64010, 6, 8, 64007, 9626, 64008, 64009, 64413, 64545, 64010, 6, 23, 64007, 10, 4363, 64008, 64009, 64623, 64885, 64010, 2255, 8, 64007, 10, 3486, 64008, 64009, 64809, 65036, 64010, 1560, 2255, 4, 24, 43867, 1684, 7, 27, 3774, 5, 10356, 9, 5, 646, 6, 8, 22, 1684, 7, 30, 10, 2007, 8, 16239, 4337, 4, 2 ] ] # fmt: on self.assertListEqual(generated_ids.to("cpu").numpy().tolist(), EXPECTED_IDS_LONG) EXPECTED_PROCESSED_TEXT_LONG = ( "<grounding> Describe this image in detail: The image features a snowman sitting by<phrase> a campfire" "</phrase><object><patch_index_0005><patch_index_1007></object> in the snow. He is wearing<phrase> a hat" "</phrase><object><patch_index_0048><patch_index_0250></object>,<phrase> scarf</phrase><object>" "<patch_index_0240><patch_index_0604></object>, and<phrase> gloves</phrase><object><patch_index_0400>" "<patch_index_0532></object>, with<phrase> a pot</phrase><object><patch_index_0610><patch_index_0872>" "</object> nearby and<phrase> a cup</phrase><object><patch_index_0796><patch_index_1023></object> placed " "nearby. The snowman appears to be enjoying the warmth of the fire, and it appears to have a warm and cozy " "atmosphere." ) self.assertEqual(processed_text, EXPECTED_PROCESSED_TEXT_LONG) EXPECTED_FINAL_TEXT_LONG = ( "Describe this image in detail: The image features a snowman sitting by a campfire in the snow. He is " "wearing a hat, scarf, and gloves, with a pot nearby and a cup placed nearby. The snowman appears to be " "enjoying the warmth of the fire, and it appears to have a warm and cozy atmosphere." ) self.assertEqual(final_text, EXPECTED_FINAL_TEXT_LONG) EXPECTED_ENTITIES_LONG = [ ("a campfire", (71, 81), [(0.171875, 0.015625, 0.484375, 0.984375)]), ("a hat", (109, 114), [(0.515625, 0.046875, 0.828125, 0.234375)]), ("scarf", (116, 121), [(0.515625, 0.234375, 0.890625, 0.578125)]), ("gloves", (127, 133), [(0.515625, 0.390625, 0.640625, 0.515625)]), ("a pot", (140, 145), [(0.078125, 0.609375, 0.265625, 0.859375)]), ("a cup", (157, 162), [(0.890625, 0.765625, 0.984375, 0.984375)]), ] self.assertListEqual(entities, EXPECTED_ENTITIES_LONG) def test_snowman_image_captioning_batch(self): url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.png" image = Image.open(requests.get(url, stream=True).raw) image.save("new_image.jpg") image = Image.open("new_image.jpg") model = AutoModelForVision2Seq.from_pretrained("microsoft/kosmos-2-patch14-224").to(torch_device) prompt = ["<grounding>Describe this image in detail:", "<grounding>An image of"] # left padding processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224", padding_side="left") scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example( prompt, [image] * len(prompt), model, processor ) all_final_text = [x[0] for x in final_text_with_entities] all_entities = [x[1] for x in final_text_with_entities] # left padding gives identical results as non-padding EXPECTED_PROCESSED_TEXT_0 = ( "<grounding> Describe this image in detail: The image features a snowman sitting by<phrase> a campfire" "</phrase><object><patch_index_0005><patch_index_1007></object> in the snow. He is wearing<phrase> a hat" "</phrase><object><patch_index_0048><patch_index_0250></object>,<phrase> scarf</phrase><object>" "<patch_index_0240><patch_index_0604></object>, and<phrase> gloves</phrase><object><patch_index_0400>" "<patch_index_0532></object>, with<phrase> a pot</phrase><object><patch_index_0610><patch_index_0872>" "</object> nearby and<phrase> a cup</phrase><object><patch_index_0796><patch_index_1023></object> placed " "nearby. The snowman appears to be enjoying the warmth of the fire, and it appears to have a warm and cozy " "atmosphere." ) EXPECTED_PROCESSED_TEXT_1 = ( "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> " "warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>." ) self.assertListEqual(processed_text, [EXPECTED_PROCESSED_TEXT_0, EXPECTED_PROCESSED_TEXT_1]) EXPECTED_FINAL_TEXT_0 = ( "Describe this image in detail: The image features a snowman sitting by a campfire in the snow. He is " "wearing a hat, scarf, and gloves, with a pot nearby and a cup placed nearby. The snowman appears to be " "enjoying the warmth of the fire, and it appears to have a warm and cozy atmosphere." ) EXPECTED_FINAL_TEXT_1 = "An image of a snowman warming himself by a fire." self.assertListEqual(all_final_text, [EXPECTED_FINAL_TEXT_0, EXPECTED_FINAL_TEXT_1]) EXPECTED_ENTITIES_0 = [ ("a campfire", (71, 81), [(0.171875, 0.015625, 0.484375, 0.984375)]), ("a hat", (109, 114), [(0.515625, 0.046875, 0.828125, 0.234375)]), ("scarf", (116, 121), [(0.515625, 0.234375, 0.890625, 0.578125)]), ("gloves", (127, 133), [(0.515625, 0.390625, 0.640625, 0.515625)]), ("a pot", (140, 145), [(0.078125, 0.609375, 0.265625, 0.859375)]), ("a cup", (157, 162), [(0.890625, 0.765625, 0.984375, 0.984375)]), ] EXPECTED_ENTITIES_1 = [ ("a snowman", (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ("a fire", (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)]), ] self.assertListEqual(all_entities, [EXPECTED_ENTITIES_0, EXPECTED_ENTITIES_1]) # right padding processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example( prompt, [image] * len(prompt), model, processor ) all_final_text = [x[0] for x in final_text_with_entities] all_entities = [x[1] for x in final_text_with_entities] # For right padding, only the non-padded sequences will give the same results as non-padding self.assertEqual(processed_text[0], EXPECTED_PROCESSED_TEXT_0) self.assertEqual(all_final_text[0], EXPECTED_FINAL_TEXT_0) self.assertListEqual(all_entities[0], EXPECTED_ENTITIES_0)
transformers/tests/models/kosmos2/test_modeling_kosmos2.py/0
{ "file_path": "transformers/tests/models/kosmos2/test_modeling_kosmos2.py", "repo_id": "transformers", "token_count": 15852 }
382
# coding=utf-8 # Copyright 2022 The Hugging Face Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MarkupLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MarkupLMForQuestionAnswering, MarkupLMForSequenceClassification, MarkupLMForTokenClassification, MarkupLMModel, ) # TODO check dependencies from transformers import MarkupLMFeatureExtractor, MarkupLMProcessor, MarkupLMTokenizer class MarkupLMModelTester: """You can also import this e.g from .test_modeling_markuplm import MarkupLMModelTester""" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, scope=None, max_xpath_tag_unit_embeddings=20, max_xpath_subs_unit_embeddings=30, tag_pad_id=2, subs_pad_id=2, max_depth=10, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope self.max_xpath_tag_unit_embeddings = max_xpath_tag_unit_embeddings self.max_xpath_subs_unit_embeddings = max_xpath_subs_unit_embeddings self.tag_pad_id = tag_pad_id self.subs_pad_id = subs_pad_id self.max_depth = max_depth def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) xpath_tags_seq = ids_tensor( [self.batch_size, self.seq_length, self.max_depth], self.max_xpath_tag_unit_embeddings ) xpath_subs_seq = ids_tensor( [self.batch_size, self.seq_length, self.max_depth], self.max_xpath_subs_unit_embeddings ) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return ( config, input_ids, xpath_tags_seq, xpath_subs_seq, token_type_ids, input_mask, sequence_labels, token_labels, ) def get_config(self): return MarkupLMConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, max_xpath_tag_unit_embeddings=self.max_xpath_tag_unit_embeddings, max_xpath_subs_unit_embeddings=self.max_xpath_subs_unit_embeddings, tag_pad_id=self.tag_pad_id, subs_pad_id=self.subs_pad_id, max_depth=self.max_depth, ) def create_and_check_model( self, config, input_ids, xpath_tags_seq, xpath_subs_seq, token_type_ids, input_mask, sequence_labels, token_labels, ): model = MarkupLMModel(config=config) model.to(torch_device) model.eval() print("Configs:", model.config.tag_pad_id, model.config.subs_pad_id) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_sequence_classification( self, config, input_ids, xpath_tags_seq, xpath_subs_seq, token_type_ids, input_mask, sequence_labels, token_labels, ): config.num_labels = self.num_labels model = MarkupLMForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, xpath_tags_seq, xpath_subs_seq, token_type_ids, input_mask, sequence_labels, token_labels, ): config.num_labels = self.num_labels model = MarkupLMForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, xpath_tags_seq, xpath_subs_seq, token_type_ids, input_mask, sequence_labels, token_labels, ): model = MarkupLMForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, xpath_tags_seq, xpath_subs_seq, token_type_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "xpath_tags_seq": xpath_tags_seq, "xpath_subs_seq": xpath_subs_seq, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class MarkupLMModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( MarkupLMModel, MarkupLMForSequenceClassification, MarkupLMForTokenClassification, MarkupLMForQuestionAnswering, ) if is_torch_available() else None ) pipeline_model_mapping = ( { "feature-extraction": MarkupLMModel, "question-answering": MarkupLMForQuestionAnswering, "text-classification": MarkupLMForSequenceClassification, "token-classification": MarkupLMForTokenClassification, "zero-shot": MarkupLMForSequenceClassification, } if is_torch_available() else {} ) # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): # ValueError: Nodes must be of type `List[str]` (single pretokenized example), or `List[List[str]]` # (batch of pretokenized examples). return True def setUp(self): self.model_tester = MarkupLMModelTester(self) self.config_tester = ConfigTester(self, config_class=MarkupLMConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def prepare_html_string(): html_string = """ <!DOCTYPE html> <html> <head> <title>Page Title</title> </head> <body> <h1>This is a Heading</h1> <p>This is a paragraph.</p> </body> </html> """ return html_string @require_torch class MarkupLMModelIntegrationTest(unittest.TestCase): @cached_property def default_processor(self): # TODO use from_pretrained here feature_extractor = MarkupLMFeatureExtractor() tokenizer = MarkupLMTokenizer.from_pretrained("microsoft/markuplm-base") return MarkupLMProcessor(feature_extractor, tokenizer) @slow def test_forward_pass_no_head(self): model = MarkupLMModel.from_pretrained("microsoft/markuplm-base").to(torch_device) processor = self.default_processor inputs = processor(prepare_html_string(), return_tensors="pt") inputs = inputs.to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the last hidden states expected_shape = torch.Size([1, 14, 768]) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[0.0675, -0.0052, 0.5001], [-0.2281, 0.0802, 0.2192], [-0.0583, -0.3311, 0.1185]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/markuplm/test_modeling_markuplm.py/0
{ "file_path": "transformers/tests/models/markuplm/test_modeling_markuplm.py", "repo_id": "transformers", "token_count": 6144 }
383
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPT2Tokenizer, TFOPTForCausalLM, TFOPTModel def prepare_opt_inputs_dict(config, input_ids, attention_mask=None, head_mask=None): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class TFOPTModelTester: config_cls = OPTConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, embed_dim=16, word_embed_proj_dim=16, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.embed_dim = embed_dim self.word_embed_proj_dim = word_embed_proj_dim self.is_encoder_decoder = False def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) config = self.config_cls( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, word_embed_proj_dim=self.word_embed_proj_dim, is_encoder_decoder=False, **self.config_updates, ) inputs_dict = prepare_opt_inputs_dict(config, input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFOPTModel(config=config) input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) @require_tf class TFOPTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () all_generative_model_classes = (TFOPTForCausalLM,) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {} ) is_encoder_decoder = False test_pruning = False test_onnx = False onnx_min_opset = 10 def setUp(self): self.model_tester = TFOPTModelTester(self) self.config_tester = ConfigTester(self, config_class=OPTConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) def test_resize_token_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(model, embedding_layer): if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build_in_name_scope() if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings model = model_class(config=config) old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) # reshape the embeddings model.resize_token_embeddings(size) new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) # check that the resized embeddings size matches the desired size. assert_size = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0], assert_size) # check that weights remain the same after resizing models_equal = True for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0], assert_size) models_equal = True for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) @require_tf class TFOPTHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): eos_column_vector = tf.ones((4, 1), dtype=tf.int32) * 2 input_ids = tf.concat([ids_tensor((4, 6), self.vocab_size - 3) + 3, eos_column_vector], axis=1) batch_size = input_ids.shape[0] config = OPTConfig( vocab_size=self.vocab_size, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size @require_sentencepiece @require_tf class OPTModelIntegrationTests(unittest.TestCase): @slow def test_inference_no_head(self): model = TFOPTModel.from_pretrained("facebook/opt-350m") input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = tf.not_equal(input_ids, model.config.pad_token_id) with tf.GradientTape(): output = model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state expected_shape = (1, 11, 512) self.assertEqual(output.shape, expected_shape) expected_slice = tf.constant( [[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] ) self.assertTrue(np.allclose(output[:, :3, :3], expected_slice, atol=4e-3)) xla_generate = tf.function(model, jit_compile=True) output = xla_generate(input_ids, attention_mask)[0] self.assertTrue(np.allclose(output[:, :3, :3], expected_slice, atol=4e-2)) @require_tf @slow class TFOPTEmbeddingsTest(unittest.TestCase): def setUp(self): super().setUp() self.path_model = "facebook/opt-350m" def test_logits(self): model = TFOPTForCausalLM.from_pretrained(self.path_model) tokenizer = GPT2Tokenizer.from_pretrained(self.path_model) prompts = [ "Today is a beautiful day and I want to", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False inputs = tokenizer(prompts, return_tensors="tf", padding=True, add_special_tokens=False) logits = tf.math.reduce_mean(model(inputs.input_ids, attention_mask=inputs.attention_mask)[0], axis=-1) logits_meta = tf.constant( [ [1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670], [-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822], [0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703], [6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477], ] ) self.assertTrue(np.allclose(logits, logits_meta, atol=1e-4)) xla_generate = tf.function(model, jit_compile=True) logits = tf.math.reduce_mean(xla_generate(inputs.input_ids, attention_mask=inputs.attention_mask)[0], axis=-1) self.assertTrue(np.allclose(logits, logits_meta, atol=1e-4)) @require_tf @slow class TFOPTGenerationTest(unittest.TestCase): @property def prompts(self): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def test_generation_pre_attn_layer_norm(self): model_id = "facebook/opt-125m" EXPECTED_OUTPUTS = [ "Today is a beautiful day and I want to", "In the city of New York, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] predicted_outputs = [] tokenizer = GPT2Tokenizer.from_pretrained(model_id) model = TFOPTForCausalLM.from_pretrained(model_id) for prompt in self.prompts: input_ids = tokenizer(prompt, return_tensors="tf").input_ids generated_ids = model.generate(input_ids, max_length=10) generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS) def test_batch_generation(self): model_id = "facebook/opt-350m" tokenizer = GPT2Tokenizer.from_pretrained(model_id) model = TFOPTForCausalLM.from_pretrained(model_id) tokenizer.padding_side = "left" # use different length sentences to test batching sentences = [ "Hello, my dog is a little", "Today, I", ] inputs = tokenizer(sentences, return_tensors="tf", padding=True) input_ids = inputs["input_ids"] outputs = model.generate(input_ids=input_ids, attention_mask=inputs["attention_mask"]) inputs_non_padded = tokenizer(sentences[0], return_tensors="tf").input_ids output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs["attention_mask"][-1], tf.int64) ) inputs_padded = tokenizer(sentences[1], return_tensors="tf").input_ids output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit of a dork.\nI'm a little bit", "Today, I was in the middle of a conversation with a friend about the", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(batch_out_sentence, [non_padded_sentence, padded_sentence]) def test_generation_post_attn_layer_norm(self): model_id = "facebook/opt-350m" EXPECTED_OUTPUTS = [ "Today is a beautiful day and I want to", "In the city of San Francisco, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] predicted_outputs = [] tokenizer = GPT2Tokenizer.from_pretrained(model_id) model = TFOPTForCausalLM.from_pretrained(model_id) for prompt in self.prompts: input_ids = tokenizer(prompt, return_tensors="tf").input_ids generated_ids = model.generate(input_ids, max_length=10) generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS)
transformers/tests/models/opt/test_modeling_tf_opt.py/0
{ "file_path": "transformers/tests/models/opt/test_modeling_tf_opt.py", "repo_id": "transformers", "token_count": 7496 }
384
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Pvt model. """ import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import PvtConfig, PvtForImageClassification, PvtImageProcessor, PvtModel from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES from transformers.models.pvt.modeling_pvt import PVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class PvtConfigTester(ConfigTester): def run_common_tests(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class PvtModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[16, 32, 64, 128], downsampling_rates=[1, 4, 8, 16], num_attention_heads=[1, 2, 4, 8], is_training=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.num_attention_heads = num_attention_heads self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return PvtConfig( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = PvtModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertIsNotNone(result.last_hidden_state) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = PvtForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = PvtForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class PvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (PvtModel, PvtForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": PvtModel, "image-classification": PvtForImageClassification} if is_torch_available() else {} ) test_head_masking = False test_pruning = False test_resize_embeddings = False test_torchscript = False has_attentions = False def setUp(self): self.model_tester = PvtModelTester(self) self.config_tester = PvtConfigTester(self, config_class=PvtConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip("Pvt does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip("Pvt does not have get_input_embeddings method and get_output_embeddings methods") def test_model_common_attributes(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, param in model.named_parameters(): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = sum(self.model_tester.depths) + 1 self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.batch_size, (self.model_tester.image_size // 4) ** 2, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ in MODEL_MAPPING_NAMES.values(): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @slow def test_model_from_pretrained(self): for model_name in PVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = PvtModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class PvtModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_classification(self): # only resize + normalize image_processor = PvtImageProcessor.from_pretrained("Zetatech/pvt-tiny-224") model = PvtForImageClassification.from_pretrained("Zetatech/pvt-tiny-224").to(torch_device).eval() image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) expected_shape = torch.Size((1, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.4192, -1.9158, -0.9702]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_model(self): model = PvtModel.from_pretrained("Zetatech/pvt-tiny-224").to(torch_device).eval() image_processor = PvtImageProcessor.from_pretrained("Zetatech/pvt-tiny-224") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # forward pass with torch.no_grad(): outputs = model(pixel_values) # verify the logits expected_shape = torch.Size((1, 50, 512)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.3086, 1.0402, 1.1816], [-0.2880, 0.5781, 0.6124], [0.1480, 0.6129, -0.0590]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) @slow @require_accelerate @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): r""" A small test to make sure that inference work in half precision without any problem. """ model = PvtForImageClassification.from_pretrained("Zetatech/pvt-tiny-224", torch_dtype=torch.float16) model.to(torch_device) image_processor = PvtImageProcessor(size=224) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device, dtype=torch.float16) # forward pass to make sure inference works in fp16 with torch.no_grad(): _ = model(pixel_values)
transformers/tests/models/pvt/test_modeling_pvt.py/0
{ "file_path": "transformers/tests/models/pvt/test_modeling_pvt.py", "repo_id": "transformers", "token_count": 5192 }
385
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class RoFormerTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "junnyu/roformer_chinese_small" tokenizer_class = RoFormerTokenizer rust_tokenizer_class = RoFormerTokenizerFast space_between_special_tokens = True test_rust_tokenizer = True def setUp(self): super().setUp() def get_tokenizer(self, **kwargs): return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base", **kwargs) def get_rust_tokenizer(self, **kwargs): return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base", **kwargs) def get_chinese_input_output_texts(self): input_text = "永和服装饰品有限公司,今天天气非常好" output_text = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好" return input_text, output_text def test_tokenizer(self): tokenizer = self.get_tokenizer() input_text, output_text = self.get_chinese_input_output_texts() tokens = tokenizer.tokenize(input_text) self.assertListEqual(tokens, output_text.split()) input_tokens = tokens + [tokenizer.unk_token] exp_tokens = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), exp_tokens) def test_rust_tokenizer(self): tokenizer = self.get_rust_tokenizer() input_text, output_text = self.get_chinese_input_output_texts() tokens = tokenizer.tokenize(input_text) self.assertListEqual(tokens, output_text.split()) input_tokens = tokens + [tokenizer.unk_token] exp_tokens = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), exp_tokens) # can't train new_tokenizer via Tokenizers lib def test_training_new_tokenizer(self): pass # can't train new_tokenizer via Tokenizers lib def test_training_new_tokenizer_with_special_tokens_change(self): pass def test_save_slow_from_fast_and_reload_fast(self): for cls in [RoFormerTokenizer, RoFormerTokenizerFast]: original = cls.from_pretrained("alchemab/antiberta2") self.assertEqual(original.encode("生活的真谛是"), [1, 4, 4, 4, 4, 4, 4, 2]) with tempfile.TemporaryDirectory() as tmp_dir: original.save_pretrained(tmp_dir) new = cls.from_pretrained(tmp_dir) self.assertEqual(new.encode("生活的真谛是"), [1, 4, 4, 4, 4, 4, 4, 2])
transformers/tests/models/roformer/test_tokenization_roformer.py/0
{ "file_path": "transformers/tests/models/roformer/test_tokenization_roformer.py", "repo_id": "transformers", "token_count": 1417 }
386
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch SegFormer model. """ import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class SegformerConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class SegformerModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, num_encoder_blocks=4, depths=[1, 1, 1, 1], sr_ratios=[8, 4, 2, 1], hidden_sizes=[8, 8, 16, 16], downsampling_rates=[1, 4, 8, 16], num_attention_heads=[1, 1, 2, 2], is_training=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.num_attention_heads = num_attention_heads self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return SegformerConfig( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = SegformerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = expected_width = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def create_and_check_for_image_segmentation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = SegformerForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) result = model(pixel_values, labels=labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss, 0.0) def create_and_check_for_binary_image_segmentation(self, config, pixel_values, labels): config.num_labels = 1 model = SegformerForSemanticSegmentation(config=config) model.to(torch_device) model.eval() labels = torch.randint(0, 1, (self.batch_size, self.image_size, self.image_size)).to(torch_device) result = model(pixel_values, labels=labels) self.parent.assertGreater(result.loss, 0.0) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SegformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "image-feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) fx_compatible = True test_head_masking = False test_pruning = False test_resize_embeddings = False def setUp(self): self.model_tester = SegformerModelTester(self) self.config_tester = SegformerConfigTester(self, config_class=SegformerConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_binary_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) @unittest.skip("SegFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods") def test_model_common_attributes(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = sum(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) # verify the first attentions (first block, first layer) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) # verify the last attentions (last block, last layer) expected_seq_len = (self.model_tester.image_size // 32) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:]), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) # verify the first attentions (first block, first layer) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_encoder_blocks self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ in MODEL_MAPPING_NAMES.values(): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @slow def test_model_from_pretrained(self): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SegformerModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class SegformerModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_segmentation_ade(self): # only resize + normalize image_processor = SegformerImageProcessor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to( torch_device ) image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) expected_shape = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_image_segmentation_city(self): # only resize + normalize image_processor = SegformerImageProcessor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(torch_device) image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) expected_shape = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], expected_slice, atol=1e-1)) @slow def test_post_processing_semantic_segmentation(self): # only resize + normalize image_processor = SegformerImageProcessor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to( torch_device ) image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) outputs.logits = outputs.logits.detach().cpu() segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) expected_shape = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((128, 128)) self.assertEqual(segmentation[0].shape, expected_shape)
transformers/tests/models/segformer/test_modeling_segformer.py/0
{ "file_path": "transformers/tests/models/segformer/test_modeling_segformer.py", "repo_id": "transformers", "token_count": 7800 }
387
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import Swin2SRImageProcessor from transformers.image_transforms import get_image_size class Swin2SRImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_rescale=True, rescale_factor=1 / 255, do_pad=True, pad_size=8, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.pad_size = pad_size def prepare_image_processor_dict(self): return { "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, "pad_size": self.pad_size, } def expected_output_image_shape(self, images): img = images[0] if isinstance(img, Image.Image): input_width, input_height = img.size else: input_height, input_width = img.shape[-2:] pad_height = (input_height // self.pad_size + 1) * self.pad_size - input_height pad_width = (input_width // self.pad_size + 1) * self.pad_size - input_width return self.num_channels, input_height + pad_height, input_width + pad_width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class Swin2SRImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = Swin2SRImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = Swin2SRImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_rescale")) self.assertTrue(hasattr(image_processor, "rescale_factor")) self.assertTrue(hasattr(image_processor, "do_pad")) self.assertTrue(hasattr(image_processor, "pad_size")) def calculate_expected_size(self, image): old_height, old_width = get_image_size(image) size = self.image_processor_tester.pad_size pad_height = (old_height // size + 1) * size - old_height pad_width = (old_width // size + 1) * size - old_width return old_height + pad_height, old_width + pad_width # Swin2SRImageProcessor does not support batched input def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Swin2SRImageProcessor does not support batched input def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Swin2SRImageProcessor does not support batched input def test_call_numpy_4_channels(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors self.image_processor_tester.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing( image_inputs[0], return_tensors="pt", input_data_format="channels_first" ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) self.image_processor_tester.num_channels = 3 # Swin2SRImageProcessor does not support batched input def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
transformers/tests/models/swin2sr/test_image_processing_swin2sr.py/0
{ "file_path": "transformers/tests/models/swin2sr/test_image_processing_swin2sr.py", "repo_id": "transformers", "token_count": 2903 }
388
# coding=utf-8 # Copyright 2023 The Intel Team Authors, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch TVP model. """ import unittest from transformers import ResNetConfig, TvpConfig from transformers.testing_utils import require_torch, require_vision, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import TvpForVideoGrounding, TvpModel if is_vision_available(): from PIL import Image from transformers import TvpImageProcessor # Copied from test.models.videomae.test_modeling_videomae.VideoMAEModelTester with VideoMAE->TVP class TVPModelTester: def __init__( self, parent, batch_size=1, seq_length=2, alpha=1.0, beta=0.1, visual_prompter_type="framepad", visual_prompter_apply="replace", num_frames=2, max_img_size=448, visual_prompt_size=96, vocab_size=100, hidden_size=32, intermediate_size=32, num_hidden_layers=2, num_attention_heads=4, max_position_embeddings=30, max_grid_col_position_embeddings=30, max_grid_row_position_embeddings=30, hidden_dropout_prob=0.1, hidden_act="gelu", layer_norm_eps=1e-12, initializer_range=0.02, pad_token_id=0, type_vocab_size=2, attention_probs_dropout_prob=0.1, ): self.parent = parent self.batch_size = batch_size self.input_id_length = seq_length self.seq_length = seq_length + 10 + 784 # include text prompt length and visual input length self.alpha = alpha self.beta = beta self.visual_prompter_type = visual_prompter_type self.visual_prompter_apply = visual_prompter_apply self.num_frames = num_frames self.max_img_size = max_img_size self.visual_prompt_size = visual_prompt_size self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.max_grid_col_position_embeddings = max_grid_col_position_embeddings self.max_grid_row_position_embeddings = max_grid_row_position_embeddings self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.pad_token_id = pad_token_id self.type_vocab_size = type_vocab_size self.is_training = False self.num_channels = 3 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.input_id_length], self.vocab_size) attention_mask = random_attention_mask([self.batch_size, self.input_id_length]) pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.max_img_size, self.max_img_size] ) config = self.get_config() return (config, input_ids, pixel_values, attention_mask) def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=64, hidden_sizes=[64, 128], depths=[2, 2], hidden_act="relu", out_features=["stage2"], out_indices=[2], ) return TvpConfig( backbone_config=resnet_config, backbone=None, alpha=self.alpha, beta=self.beta, visual_prompter_type=self.visual_prompter_type, visual_prompter_apply=self.visual_prompter_apply, num_frames=self.num_frames, max_img_size=self.max_img_size, visual_prompt_size=self.visual_prompt_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_grid_col_position_embeddings=self.max_grid_col_position_embeddings, max_grid_row_position_embeddings=self.max_grid_row_position_embeddings, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, type_vocab_size=self.type_vocab_size, ) def create_and_check_model(self, config, input_ids, pixel_values, attention_mask): model = TvpModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, pixel_values, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "pixel_values": pixel_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class TVPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as TVP does not use, inputs_embeds. The seq_length in TVP contain textual and visual inputs, and prompt. """ all_model_classes = (TvpModel, TvpForVideoGrounding) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": TvpModel, "temporal-video-grounding": TvpForVideoGrounding} if is_torch_available() else {} ) # TODO: Enable this once this model gets more usage test_torchscript = False def setUp(self): self.model_tester = TVPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="TVP does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="TVPModel does not have input/output embeddings") def test_model_common_attributes(self): pass # override as the `logit_scale` parameter initilization is different for TVP def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # params are randomly initialized. self.assertAlmostEqual( param.data.mean().item(), 0.0, delta=1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @require_torch class TvpModelIntegrationTests(unittest.TestCase): @cached_property def default_image_processor(self): return TvpImageProcessor.from_pretrained("Jiqing/tiny-random-tvp") if is_vision_available() else None def test_inference_no_head(self): model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt") input_ids = torch.tensor([[1, 2]]) attention_mask = torch.tensor([[1, 1]]) encoding.update({"input_ids": input_ids, "attention_mask": attention_mask}) encoding.to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 796, 128)) assert outputs.last_hidden_state.shape == expected_shape expected_slice = torch.tensor( [[-0.4902, -0.4121, -1.7872], [-0.2184, 2.1211, -0.9371], [0.1180, 0.5003, -0.1727]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_with_head(self): model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt") input_ids = torch.tensor([[1, 2]]) attention_mask = torch.tensor([[1, 1]]) encoding.update({"input_ids": input_ids, "attention_mask": attention_mask}) encoding.to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 2)) assert outputs.logits.shape == expected_shape expected_slice = torch.tensor([[0.5061, 0.4988]]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits, expected_slice, atol=1e-4))
transformers/tests/models/tvp/test_modeling_tvp.py/0
{ "file_path": "transformers/tests/models/tvp/test_modeling_tvp.py", "repo_id": "transformers", "token_count": 4545 }
389
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class VisionTextDualEncoderProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: skip self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) image_processor_map = { "do_resize": True, "size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME) with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) def get_tokenizer(self, **kwargs): return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_image_processor(self, **kwargs): return ViTImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() image_processor = self.get_image_processor() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) processor.save_pretrained(self.tmpdirname) processor = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast)) self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor.image_processor, ViTImageProcessor) def test_save_load_pretrained_additional_features(self): processor = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast)) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, ViTImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "token_type_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with self.assertRaises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), processor.model_input_names)
transformers/tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py/0
{ "file_path": "transformers/tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py", "repo_id": "transformers", "token_count": 2796 }
390
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch ViTDet model. """ import unittest from transformers import VitDetConfig from transformers.testing_utils import is_flaky, require_torch, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import VitDetBackbone, VitDetModel class VitDetModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.num_patches_one_direction = self.image_size // self.patch_size self.seq_length = (self.image_size // self.patch_size) ** 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return VitDetConfig( image_size=self.image_size, pretrain_image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = VitDetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_size, self.num_patches_one_direction, self.num_patches_one_direction), ) def create_and_check_backbone(self, config, pixel_values, labels): model = VitDetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify hidden states self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, self.num_patches_one_direction, self.num_patches_one_direction], ) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, [config.hidden_size]) # verify backbone works with out_features=None config.out_features = None model = VitDetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, self.num_patches_one_direction, self.num_patches_one_direction], ) # verify channels self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_size]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VitDetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as VitDet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (VitDetModel, VitDetBackbone) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": VitDetModel} if is_torch_available() else {} fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VitDetModelTester(self) self.config_tester = ConfigTester(self, config_class=VitDetConfig, has_text_modality=False, hidden_size=37) @is_flaky(max_attempts=3, description="`torch.nn.init.trunc_normal_` is flaky.") def test_initialization(self): super().test_initialization() # TODO: Fix me (once this model gets more usage) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_cpu_offload(self): super().test_cpu_offload() # TODO: Fix me (once this model gets more usage) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_disk_offload_bin(self): super().test_disk_offload() @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_disk_offload_safetensors(self): super().test_disk_offload() # TODO: Fix me (once this model gets more usage) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_model_parallelism(self): super().test_model_parallelism() def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="VitDet does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = self.model_tester.num_hidden_layers self.assertEqual(len(hidden_states), expected_num_stages + 1) # VitDet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [ self.model_tester.num_patches_one_direction, self.model_tester.num_patches_one_direction, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # overwrite since VitDet only supports retraining gradients of hidden states def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) @unittest.skip(reason="VitDet does not support feedforward chunking") def test_feed_forward_chunking(self): pass @unittest.skip(reason="VitDet does not have standalone checkpoints since it used as backbone in other models") def test_model_from_pretrained(self): pass @require_torch class VitDetBackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (VitDetBackbone,) if is_torch_available() else () config_class = VitDetConfig has_attentions = False def setUp(self): self.model_tester = VitDetModelTester(self)
transformers/tests/models/vitdet/test_modeling_vitdet.py/0
{ "file_path": "transformers/tests/models/vitdet/test_modeling_vitdet.py", "repo_id": "transformers", "token_count": 4686 }
391
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch YOLOS model. """ import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class YolosModelTester: def __init__( self, parent, batch_size=13, image_size=[30, 30], patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, n_targets=8, num_detection_tokens=10, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope self.n_targets = n_targets self.num_detection_tokens = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens num_patches = (image_size[1] // patch_size) * (image_size[0] // patch_size) self.expected_seq_len = num_patches + 1 + self.num_detection_tokens def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, labels def get_config(self): return YolosConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = YolosModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size) ) def create_and_check_for_object_detection(self, config, pixel_values, labels): model = YolosForObjectDetection(config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4)) result = model(pixel_values=pixel_values, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class YolosModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as YOLOS does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (YolosModel, YolosForObjectDetection) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False # special case for head model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "YolosForObjectDetection": labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = YolosModelTester(self) self.config_tester = ConfigTester(self, config_class=YolosConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_inputs_embeds(self): # YOLOS does not use inputs_embeds pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # in YOLOS, the seq_len is different seq_len = self.model_tester.expected_seq_len for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # YOLOS has a different seq_length seq_length = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_object_detection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = YolosModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class YolosModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("hustvl/yolos-small") if is_vision_available() else None @slow def test_inference_object_detection_head(self): model = YolosForObjectDetection.from_pretrained("hustvl/yolos-small").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(inputs.pixel_values) # verify outputs expected_shape = torch.Size((1, 100, 92)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice_logits = torch.tensor( [[-23.7219, -10.3165, -14.9083], [-41.5429, -15.2403, -24.1478], [-29.3909, -12.7173, -19.4650]], device=torch_device, ) expected_slice_boxes = torch.tensor( [[0.2536, 0.5449, 0.4643], [0.2037, 0.7735, 0.3672], [0.7692, 0.4056, 0.4549]], device=torch_device ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) # verify postprocessing results = image_processor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.9991, 0.9801, 0.9978, 0.9875, 0.9848]).to(torch_device) expected_labels = [75, 75, 17, 63, 17] expected_slice_boxes = torch.tensor([331.8438, 80.5440, 369.9546, 188.0579]).to(torch_device) self.assertEqual(len(results["scores"]), 5) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes))
transformers/tests/models/yolos/test_modeling_yolos.py/0
{ "file_path": "transformers/tests/models/yolos/test_modeling_yolos.py", "repo_id": "transformers", "token_count": 6777 }
392
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, PreTrainedTokenizerBase, is_vision_available, ) from transformers.pipelines import ImageClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torch_or_tf, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch_or_tf @require_vision class ImageClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): image_classifier = ImageClassificationPipeline(model=model, image_processor=processor, top_k=2) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] return image_classifier, examples def run_pipeline_test(self, image_classifier, examples): outputs = image_classifier("./tests/fixtures/tests_samples/COCO/000000039769.png") self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) import datasets # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") # Accepts URL + PIL.Image + lists outputs = image_classifier( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["image"], # LA dataset[1]["image"], # L dataset[2]["image"], ] ) self.assertEqual( outputs, [ [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ], ) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) @require_tf def test_small_model_tf(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model, framework="tf") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) def test_custom_tokenizer(self): tokenizer = PreTrainedTokenizerBase() # Assert that the pipeline can be initialized with a feature extractor that is not in any mapping image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", tokenizer=tokenizer ) self.assertIs(image_classifier.tokenizer, tokenizer) @slow @require_torch def test_perceiver(self): # Perceiver is not tested by `run_pipeline_test` properly. # That is because the type of feature_extractor and model preprocessor need to be kept # in sync, which is not the case in the current design image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-conv") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4385, "label": "tabby, tabby cat"}, {"score": 0.321, "label": "tiger cat"}, {"score": 0.0502, "label": "Egyptian cat"}, {"score": 0.0137, "label": "crib, cot"}, {"score": 0.007, "label": "radiator"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-fourier") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.5658, "label": "tabby, tabby cat"}, {"score": 0.1309, "label": "tiger cat"}, {"score": 0.0722, "label": "Egyptian cat"}, {"score": 0.0707, "label": "remote control, remote"}, {"score": 0.0082, "label": "computer keyboard, keypad"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-learned") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3022, "label": "tabby, tabby cat"}, {"score": 0.2362, "label": "Egyptian cat"}, {"score": 0.1856, "label": "tiger cat"}, {"score": 0.0324, "label": "remote control, remote"}, {"score": 0.0096, "label": "quilt, comforter, comfort, puff"}, ], ) @slow @require_torch def test_multilabel_classification(self): small_model = "hf-internal-testing/tiny-random-vit" # Sigmoid is applied for multi-label classification image_classifier = pipeline("image-classification", model=small_model) image_classifier.model.config.problem_type = "multi_label_classification" outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], ], ) @slow @require_torch def test_function_to_apply(self): small_model = "hf-internal-testing/tiny-random-vit" # Sigmoid is applied for multi-label classification image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier( "http://images.cocodataset.org/val2017/000000039769.jpg", function_to_apply="sigmoid", ) self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], )
transformers/tests/pipelines/test_pipelines_image_classification.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_image_classification.py", "repo_id": "transformers", "token_count": 4939 }
393
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class VideoClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): example_video_filepath = hf_hub_download( repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) video_classifier = VideoClassificationPipeline(model=model, image_processor=processor, top_k=2) examples = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def run_pipeline_test(self, video_classifier, examples): for example in examples: outputs = video_classifier(example) self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" small_feature_extractor = VideoMAEFeatureExtractor( size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} ) video_classifier = pipeline( "video-classification", model=small_model, feature_extractor=small_feature_extractor, frame_sampling_rate=4 ) video_file_path = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset") outputs = video_classifier(video_file_path, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ) outputs = video_classifier( [ video_file_path, video_file_path, ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ], ) @require_tf def test_small_model_tf(self): pass
transformers/tests/pipelines/test_pipelines_video_classification.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_video_classification.py", "repo_id": "transformers", "token_count": 1516 }
394
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert/distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert/distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert/distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): job_name = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=job_name, instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(2,)]) def test_script(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
transformers/tests/sagemaker/test_multi_node_data_parallel.py/0
{ "file_path": "transformers/tests/sagemaker/test_multi_node_data_parallel.py", "repo_id": "transformers", "token_count": 1917 }
395
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import inspect import json import os import random import tempfile import unittest import unittest.mock as mock from huggingface_hub import HfFolder, Repository, delete_repo, snapshot_download from requests.exceptions import HTTPError from transformers import is_tf_available, is_torch_available from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import ( # noqa: F401 TOKEN, USER, CaptureLogger, _tf_gpu_memory_limit, is_pt_tf_cross_test, is_staging_test, require_safetensors, require_tf, require_torch, slow, ) from transformers.utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, logging, ) logger = logging.get_logger(__name__) if is_tf_available(): import h5py import numpy as np import tensorflow as tf from transformers import ( BertConfig, PreTrainedModel, PushToHubCallback, RagRetriever, TFBertForMaskedLM, TFBertForSequenceClassification, TFBertModel, TFPreTrainedModel, TFRagModel, ) from transformers.modeling_tf_utils import keras, tf_shard_checkpoint, unpack_inputs from transformers.tf_utils import stable_softmax tf.config.experimental.enable_tensor_float_32_execution(False) if _tf_gpu_memory_limit is not None: gpus = tf.config.list_physical_devices("GPU") for gpu in gpus: # Restrict TensorFlow to only allocate x GB of memory on the GPUs try: tf.config.set_logical_device_configuration( gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)] ) logical_gpus = tf.config.list_logical_devices("GPU") print("Logical GPUs", logical_gpus) except RuntimeError as e: # Virtual devices must be set before GPUs have been initialized print(e) if is_torch_available(): from transformers import BertModel @require_tf class TFModelUtilsTest(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() # tests whether the unpack_inputs function behaves as expected def test_unpack_inputs(self): class DummyModel: def __init__(self): config_kwargs = {"output_attentions": False, "output_hidden_states": False, "return_dict": False} self.config = PretrainedConfig(**config_kwargs) self.main_input_name = "input_ids" @unpack_inputs def call( self, input_ids=None, past_key_values=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return input_ids, past_key_values, output_attentions, output_hidden_states, return_dict @unpack_inputs def foo(self, pixel_values, output_attentions=None, output_hidden_states=None, return_dict=None): return pixel_values, output_attentions, output_hidden_states, return_dict dummy_model = DummyModel() input_ids = tf.constant([0, 1, 2, 3], dtype=tf.int32) past_key_values = tf.constant([4, 5, 6, 7], dtype=tf.int32) pixel_values = tf.constant([8, 9, 10, 11], dtype=tf.int32) # test case 1: Pass inputs as keyword arguments; Booleans are inherited from the config. output = dummy_model.call(input_ids=input_ids, past_key_values=past_key_values) tf.debugging.assert_equal(output[0], input_ids) tf.debugging.assert_equal(output[1], past_key_values) self.assertFalse(output[2]) self.assertFalse(output[3]) self.assertFalse(output[4]) # test case 2: Same as above, but with positional arguments. output = dummy_model.call(input_ids, past_key_values) tf.debugging.assert_equal(output[0], input_ids) tf.debugging.assert_equal(output[1], past_key_values) self.assertFalse(output[2]) self.assertFalse(output[3]) self.assertFalse(output[4]) # test case 3: We can also pack everything in the first input. output = dummy_model.call(input_ids={"input_ids": input_ids, "past_key_values": past_key_values}) tf.debugging.assert_equal(output[0], input_ids) tf.debugging.assert_equal(output[1], past_key_values) self.assertFalse(output[2]) self.assertFalse(output[3]) self.assertFalse(output[4]) # test case 4: Explicit boolean arguments should override the config. output = dummy_model.call( input_ids=input_ids, past_key_values=past_key_values, output_attentions=False, return_dict=True ) tf.debugging.assert_equal(output[0], input_ids) tf.debugging.assert_equal(output[1], past_key_values) self.assertFalse(output[2]) self.assertFalse(output[3]) self.assertTrue(output[4]) # test case 5: Unexpected arguments should raise an exception. with self.assertRaises(ValueError): output = dummy_model.call(input_ids=input_ids, past_key_values=past_key_values, foo="bar") # test case 6: the decorator is independent from `main_input_name` -- it treats the first argument of the # decorated function as its main input. output = dummy_model.foo(pixel_values=pixel_values) tf.debugging.assert_equal(output[0], pixel_values) self.assertFalse(output[1]) self.assertFalse(output[2]) self.assertFalse(output[3]) # Tests whether the stable softmax is stable on CPU, with and without XLA def test_xla_stable_softmax(self): large_penalty = -1e9 n_tokens = 10 batch_size = 8 def masked_softmax(x, boolean_mask): numerical_mask = (1.0 - tf.cast(boolean_mask, dtype=tf.float32)) * large_penalty masked_x = x + numerical_mask return stable_softmax(masked_x) xla_masked_softmax = tf.function(masked_softmax, jit_compile=True) xla_stable_softmax = tf.function(stable_softmax, jit_compile=True) x = tf.random.normal((batch_size, n_tokens)) # Same outcome regardless of the boolean mask here masked_tokens = random.randint(0, n_tokens) boolean_mask = tf.convert_to_tensor([[1] * (n_tokens - masked_tokens) + [0] * masked_tokens], dtype=tf.int32) # We can randomly mask a random numerical input OUTSIDE XLA numerical_mask = (1.0 - tf.cast(boolean_mask, dtype=tf.float32)) * large_penalty masked_x = x + numerical_mask xla_out = xla_stable_softmax(masked_x) out = stable_softmax(masked_x) assert tf.experimental.numpy.allclose(xla_out, out) # The stable softmax has the same output as the original softmax unstable_out = tf.nn.softmax(masked_x) assert tf.experimental.numpy.allclose(unstable_out, out) # We can randomly mask a random numerical input INSIDE XLA xla_out = xla_masked_softmax(x, boolean_mask) out = masked_softmax(x, boolean_mask) assert tf.experimental.numpy.allclose(xla_out, out) def test_checkpoint_sharding_from_hub(self): model = TFBertModel.from_pretrained("ArthurZ/tiny-random-bert-sharded") # the model above is the same as the model below, just a sharded version. ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.weights, ref_model.weights): assert np.allclose(p1.numpy(), p2.numpy()) def test_sharded_checkpoint_with_prefix(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", load_weight_prefix="a/b") sharded_model = TFBertModel.from_pretrained("ArthurZ/tiny-random-bert-sharded", load_weight_prefix="a/b") for p1, p2 in zip(model.weights, sharded_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) self.assertTrue(p1.name.startswith("a/b/")) self.assertTrue(p2.name.startswith("a/b/")) def test_sharded_checkpoint_transfer(self): # If this doesn't throw an error then the test passes TFBertForSequenceClassification.from_pretrained("ArthurZ/tiny-random-bert-sharded") @is_pt_tf_cross_test def test_checkpoint_sharding_local_from_pt(self): with tempfile.TemporaryDirectory() as tmp_dir: _ = Repository(local_dir=tmp_dir, clone_from="hf-internal-testing/tiny-random-bert-sharded") model = TFBertModel.from_pretrained(tmp_dir, from_pt=True) # the model above is the same as the model below, just a sharded pytorch version. ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.weights, ref_model.weights): assert np.allclose(p1.numpy(), p2.numpy()) @is_pt_tf_cross_test def test_checkpoint_loading_with_prefix_from_pt(self): model = TFBertModel.from_pretrained( "hf-internal-testing/tiny-random-bert", from_pt=True, load_weight_prefix="a/b" ) ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", from_pt=True) for p1, p2 in zip(model.weights, ref_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) self.assertTrue(p1.name.startswith("a/b/")) @is_pt_tf_cross_test def test_checkpoint_sharding_hub_from_pt(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True) # the model above is the same as the model below, just a sharded pytorch version. ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.weights, ref_model.weights): assert np.allclose(p1.numpy(), p2.numpy()) def test_shard_checkpoint(self): # This is the model we will use, total size 340,000 bytes. model = keras.Sequential( [ keras.layers.Dense(200, use_bias=False), # size 80,000 keras.layers.Dense(200, use_bias=False), # size 160,000 keras.layers.Dense(100, use_bias=False), # size 80,000 keras.layers.Dense(50, use_bias=False), # size 20,000 ] ) inputs = tf.zeros((1, 100), dtype=tf.float32) model(inputs) weights = model.weights weights_dict = {w.name: w for w in weights} with self.subTest("No shard when max size is bigger than model size"): shards, index = tf_shard_checkpoint(weights) self.assertIsNone(index) self.assertDictEqual(shards, {TF2_WEIGHTS_NAME: weights}) with self.subTest("Test sharding, no weights bigger than max size"): shards, index = tf_shard_checkpoint(weights, max_shard_size="300kB") # Split is first two layers then last two. self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "dense/kernel:0": "tf_model-00001-of-00002.h5", "dense_1/kernel:0": "tf_model-00001-of-00002.h5", "dense_2/kernel:0": "tf_model-00002-of-00002.h5", "dense_3/kernel:0": "tf_model-00002-of-00002.h5", }, }, ) shard1 = [weights_dict["dense/kernel:0"], weights_dict["dense_1/kernel:0"]] shard2 = [weights_dict["dense_2/kernel:0"], weights_dict["dense_3/kernel:0"]] self.assertDictEqual(shards, {"tf_model-00001-of-00002.h5": shard1, "tf_model-00002-of-00002.h5": shard2}) with self.subTest("Test sharding with weights bigger than max size"): shards, index = tf_shard_checkpoint(weights, max_shard_size="100kB") # Split is first layer, second layer then last 2. self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "dense/kernel:0": "tf_model-00001-of-00003.h5", "dense_1/kernel:0": "tf_model-00002-of-00003.h5", "dense_2/kernel:0": "tf_model-00003-of-00003.h5", "dense_3/kernel:0": "tf_model-00003-of-00003.h5", }, }, ) shard1 = [weights_dict["dense/kernel:0"]] shard2 = [weights_dict["dense_1/kernel:0"]] shard3 = [weights_dict["dense_2/kernel:0"], weights_dict["dense_3/kernel:0"]] self.assertDictEqual( shards, { "tf_model-00001-of-00003.h5": shard1, "tf_model-00002-of-00003.h5": shard2, "tf_model-00003-of-00003.h5": shard3, }, ) @slow def test_special_layer_name_sharding(self): retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True) model = TFRagModel.from_pretrained("facebook/rag-token-nq", retriever=retriever) with tempfile.TemporaryDirectory() as tmp_dir: for max_size in ["150kB", "150kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size) ref_model = TFRagModel.from_pretrained(tmp_dir, retriever=retriever) for p1, p2 in zip(model.weights, ref_model.weights): assert np.allclose(p1.numpy(), p2.numpy()) @require_safetensors def test_checkpoint_sharding_local(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["150kB", "150kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".h5"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, TF2_WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, TF2_WEIGHTS_NAME))) # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): max_size_int = int(max_size[:-3]) * 2**10 else: max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: with h5py.File(shard_file, "r") as state_file: self.assertEqual(len(state_file), 1) # Check the index and the shard files found match with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".h5")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = TFBertModel.from_pretrained(tmp_dir) model.build_in_name_scope() new_model.build_in_name_scope() for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) def test_safetensors_checkpoint_sharding_local(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["150kB", "150kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size, safe_serialization=True) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".h5"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, TF2_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, TF2_WEIGHTS_INDEX_NAME))) # Check the index and the shard files found match with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".safetensors")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = TFBertModel.from_pretrained(tmp_dir) model.build_in_name_scope() new_model.build_in_name_scope() for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @slow def test_save_pretrained_signatures(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Short custom TF signature function. # `input_signature` is specific to BERT. @tf.function( input_signature=[ [ tf.TensorSpec([None, None], tf.int32, name="input_ids"), tf.TensorSpec([None, None], tf.int32, name="token_type_ids"), tf.TensorSpec([None, None], tf.int32, name="attention_mask"), ] ] ) def serving_fn(input): return model(input) # Using default signature (default behavior) overrides 'serving_default' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, saved_model=True, signatures=None) model_loaded = keras.models.load_model(f"{tmp_dir}/saved_model/1") self.assertTrue("serving_default" in list(model_loaded.signatures.keys())) # Providing custom signature function with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, saved_model=True, signatures={"custom_signature": serving_fn}) model_loaded = keras.models.load_model(f"{tmp_dir}/saved_model/1") self.assertTrue("custom_signature" in list(model_loaded.signatures.keys())) # Providing multiple custom signature function with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( tmp_dir, saved_model=True, signatures={"custom_signature_1": serving_fn, "custom_signature_2": serving_fn}, ) model_loaded = keras.models.load_model(f"{tmp_dir}/saved_model/1") self.assertTrue("custom_signature_1" in list(model_loaded.signatures.keys())) self.assertTrue("custom_signature_2" in list(model_loaded.signatures.keys())) @require_safetensors def test_safetensors_save_and_load(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) # No tf_model.h5 file, only a model.safetensors self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, TF2_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, TF2_WEIGHTS_INDEX_NAME))) new_model = TFBertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_sharded_save_and_load(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="150kB") # No tf weights or index file, only a safetensors index self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, TF2_WEIGHTS_NAME))) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, TF2_WEIGHTS_INDEX_NAME))) new_model = TFBertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @is_pt_tf_cross_test def test_safetensors_save_and_load_pt_to_tf(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") pt_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: pt_model.save_pretrained(tmp_dir, safe_serialization=True) # Check we have a model.safetensors file self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) new_model = TFBertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @is_pt_tf_cross_test def test_sharded_safetensors_save_and_load_pt_to_tf(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") pt_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: pt_model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="150kB") # Check we have a safetensors shard index file self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) new_model = TFBertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_load_from_hub(self): tf_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Can load from the TF-formatted checkpoint safetensors_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors-tf") # Check models are equal for p1, p2 in zip(safetensors_model.weights, tf_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) # Can load from the PyTorch-formatted checkpoint safetensors_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors") # Check models are equal for p1, p2 in zip(safetensors_model.weights, tf_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_tf_from_tf(self): model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = TFBertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors @is_pt_tf_cross_test def test_safetensors_tf_from_torch(self): hub_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only") model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = TFBertModel.from_pretrained(tmp_dir) for p1, p2 in zip(hub_model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_tf_from_sharded_h5_with_sharded_safetensors_local(self): with tempfile.TemporaryDirectory() as tmp_dir: path = snapshot_download("hf-internal-testing/tiny-bert-tf-safetensors-h5-sharded", cache_dir=tmp_dir) # This should not raise even if there are two types of sharded weights TFBertModel.from_pretrained(path) @require_safetensors def test_safetensors_tf_from_sharded_h5_with_sharded_safetensors_hub(self): # Confirm that we can correctly load the safetensors weights from a sharded hub repo even when TF weights present TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-safetensors-h5-sharded", use_safetensors=True) # Confirm that we can access the TF weights too TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-safetensors-h5-sharded", use_safetensors=False) @require_safetensors def test_safetensors_load_from_local(self): """ This test checks that we can load safetensors from a checkpoint that only has those on the Hub """ with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-tf-only", cache_dir=tmp) tf_model = TFBertModel.from_pretrained(location) with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-tf-safetensors-only", cache_dir=tmp) safetensors_model = TFBertModel.from_pretrained(location) for p1, p2 in zip(tf_model.weights, safetensors_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_load_from_hub_from_safetensors_pt(self): """ This test checks that we can load safetensors from a checkpoint that only has those on the Hub. saved in the "pt" format. """ tf_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-h5") # Can load from the PyTorch-formatted checkpoint safetensors_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors") for p1, p2 in zip(tf_model.weights, safetensors_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_load_from_local_from_safetensors_pt(self): """ This test checks that we can load safetensors from a local checkpoint that only has those saved in the "pt" format. """ with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-h5", cache_dir=tmp) tf_model = TFBertModel.from_pretrained(location) # Can load from the PyTorch-formatted checkpoint with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors", cache_dir=tmp) safetensors_model = TFBertModel.from_pretrained(location) for p1, p2 in zip(tf_model.weights, safetensors_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @require_safetensors def test_safetensors_load_from_hub_h5_before_safetensors(self): """ This test checks that we'll first download h5 weights before safetensors The safetensors file on that repo is a pt safetensors and therefore cannot be loaded without PyTorch """ TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors-msgpack") @require_safetensors def test_safetensors_load_from_local_h5_before_safetensors(self): """ This test checks that we'll first download h5 weights before safetensors The safetensors file on that repo is a pt safetensors and therefore cannot be loaded without PyTorch """ with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors-msgpack", cache_dir=tmp) TFBertModel.from_pretrained(location) @require_tf @is_staging_test class TFModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-model-tf") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-model-tf-callback") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-model-tf-org") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = TFBertModel(config) # Make sure model is properly initialized model.build_in_name_scope() logging.set_verbosity_info() logger = logging.get_logger("transformers.utils.hub") with CaptureLogger(logger) as cl: model.push_to_hub("test-model-tf", token=self._token) logging.set_verbosity_warning() # Check the model card was created and uploaded. self.assertIn("Uploading the following files to __DUMMY_TRANSFORMERS_USER__/test-model-tf", cl.out) new_model = TFBertModel.from_pretrained(f"{USER}/test-model-tf") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal) # Reset repo delete_repo(token=self._token, repo_id="test-model-tf") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id="test-model-tf", push_to_hub=True, token=self._token) new_model = TFBertModel.from_pretrained(f"{USER}/test-model-tf") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal) @is_pt_tf_cross_test def test_push_to_hub_callback(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = TFBertForMaskedLM(config) model.compile() with tempfile.TemporaryDirectory() as tmp_dir: push_to_hub_callback = PushToHubCallback( output_dir=tmp_dir, hub_model_id="test-model-tf-callback", hub_token=self._token, ) model.fit(model.dummy_inputs, model.dummy_inputs, epochs=1, callbacks=[push_to_hub_callback]) new_model = TFBertForMaskedLM.from_pretrained(f"{USER}/test-model-tf-callback") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal) tf_push_to_hub_params = dict(inspect.signature(TFPreTrainedModel.push_to_hub).parameters) tf_push_to_hub_params.pop("base_model_card_args") pt_push_to_hub_params = dict(inspect.signature(PreTrainedModel.push_to_hub).parameters) pt_push_to_hub_params.pop("deprecated_kwargs") self.assertDictEaual(tf_push_to_hub_params, pt_push_to_hub_params) def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = TFBertModel(config) # Make sure model is properly initialized model.build_in_name_scope() model.push_to_hub("valid_org/test-model-tf-org", token=self._token) new_model = TFBertModel.from_pretrained("valid_org/test-model-tf-org") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-model-tf-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, push_to_hub=True, token=self._token, repo_id="valid_org/test-model-tf-org") new_model = TFBertModel.from_pretrained("valid_org/test-model-tf-org") models_equal = True for p1, p2 in zip(model.weights, new_model.weights): if not tf.math.reduce_all(p1 == p2): models_equal = False break self.assertTrue(models_equal)
transformers/tests/test_modeling_tf_utils.py/0
{ "file_path": "transformers/tests/test_modeling_tf_utils.py", "repo_id": "transformers", "token_count": 16604 }
396
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate # Fake function we will use as tool def add_two(x): return x + 2 class PythonInterpreterTester(unittest.TestCase): def test_evaluate_assign(self): code = "x = 3" state = {} result = evaluate(code, {}, state=state) assert result == 3 self.assertDictEqual(state, {"x": 3}) code = "x = y" state = {"y": 5} result = evaluate(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(state, {"x": 5, "y": 5}) def test_evaluate_call(self): code = "y = add_two(x)" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 3, "y": 5}) # Won't work without the tool with CaptureStdout() as out: result = evaluate(code, {}, state=state) assert result is None assert "tried to execute add_two" in out.out def test_evaluate_constant(self): code = "x = 3" state = {} result = evaluate(code, {}, state=state) assert result == 3 self.assertDictEqual(state, {"x": 3}) def test_evaluate_dict(self): code = "test_dict = {'x': x, 'y': add_two(x)}" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) self.assertDictEqual(result, {"x": 3, "y": 5}) self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}}) def test_evaluate_expression(self): code = "x = 3\ny = 5" state = {} result = evaluate(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(state, {"x": 3, "y": 5}) def test_evaluate_f_string(self): code = "text = f'This is x: {x}.'" state = {"x": 3} result = evaluate(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(state, {"x": 3, "text": "This is x: 3."}) def test_evaluate_if(self): code = "if x <= 3:\n y = 2\nelse:\n y = 5" state = {"x": 3} result = evaluate(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(state, {"x": 3, "y": 2}) state = {"x": 8} result = evaluate(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(state, {"x": 8, "y": 5}) def test_evaluate_list(self): code = "test_list = [x, add_two(x)]" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) self.assertListEqual(result, [3, 5]) self.assertDictEqual(state, {"x": 3, "test_list": [3, 5]}) def test_evaluate_name(self): code = "y = x" state = {"x": 3} result = evaluate(code, {}, state=state) assert result == 3 self.assertDictEqual(state, {"x": 3, "y": 3}) def test_evaluate_subscript(self): code = "test_list = [x, add_two(x)]\ntest_list[1]" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 3, "test_list": [3, 5]}) code = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}}) def test_evaluate_for(self): code = "x = 0\nfor i in range(3):\n x = i" state = {} result = evaluate(code, {"range": range}, state=state) assert result == 2 self.assertDictEqual(state, {"x": 2, "i": 2})
transformers/tests/tools/test_python_interpreter.py/0
{ "file_path": "transformers/tests/tools/test_python_interpreter.py", "repo_id": "transformers", "token_count": 2013 }
397
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class HfArgumentParserTest(unittest.TestCase): def test_set_level(self): logger = logging.get_logger() # the current default level is logging.WARNING level_origin = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) # restore to the original level logging.set_verbosity(level_origin) def test_integration(self): level_origin = logging.get_verbosity() logger = logging.get_logger("transformers.models.bart.tokenization_bart") msg = "Testing 1, 2, 3" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(logger) as cl: logger.warning(msg) self.assertEqual(cl.out, msg + "\n") # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(logger) as cl: logger.warning(msg) self.assertEqual(cl.out, "") # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(logger) as cl: logger.warning(msg) self.assertEqual(cl.out, msg + "\n") # restore to the original level logging.set_verbosity(level_origin) @mockenv(TRANSFORMERS_VERBOSITY="error") def test_env_override(self): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var _ = logging.get_logger("transformers.models.bart.tokenization_bart") env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) env_level = logging.log_levels[env_level_str] current_level = logging.get_verbosity() self.assertEqual( env_level, current_level, f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}", ) # restore to the original level os.environ["TRANSFORMERS_VERBOSITY"] = "" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="super-error") def test_env_invalid_override(self): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() logger = logging.logging.getLogger() with CaptureLogger(logger) as cl: # this action activates the env var logging.get_logger("transformers.models.bart.tokenization_bart") self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error", cl.out) # no need to restore as nothing was changed def test_advisory_warnings(self): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() logger = logging.get_logger("transformers.models.bart.tokenization_bart") msg = "Testing 1, 2, 3" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1"): # nothing should be logged as env var disables this method with CaptureLogger(logger) as cl: logger.warning_advice(msg) self.assertEqual(cl.out, "") with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=""): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(logger) as cl: logger.warning_advice(msg) self.assertEqual(cl.out, msg + "\n") def test_set_progress_bar_enabled(): disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
transformers/tests/utils/test_logging.py/0
{ "file_path": "transformers/tests/utils/test_logging.py", "repo_id": "transformers", "token_count": 2007 }
398
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is responsible for making sure the dummies in utils/dummies_xxx.py are up to date with the main init. Why dummies? This is to make sure that a user can always import all objects from `transformers`, even if they don't have the necessary extra libs installed. Those objects will then raise helpful error message whenever the user tries to access one of their methods. Usage (from the root of the repo): Check that the dummy files are up to date (used in `make repo-consistency`): ```bash python utils/check_dummies.py ``` Update the dummy files if needed (used in `make fix-copies`): ```bash python utils/check_dummies.py --fix_and_overwrite ``` """ import argparse import os import re from typing import Dict, List, Optional # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py PATH_TO_TRANSFORMERS = "src/transformers" # Matches is_xxx_available() _re_backend = re.compile(r"is\_([a-z_]*)_available()") # Matches from xxx import bla _re_single_line_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Matches if not is_xxx_available() _re_test_backend = re.compile(r"^\s+if\s+not\s+\(?is\_[a-z_]*\_available\(\)") # Template for the dummy objects. DUMMY_CONSTANT = """ {0} = None """ DUMMY_CLASS = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ DUMMY_FUNCTION = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def find_backend(line: str) -> Optional[str]: """ Find one (or multiple) backend in a code line of the init. Args: line (`str`): A code line in an init file. Returns: Optional[`str`]: If one (or several) backend is found, returns it. In the case of multiple backends (the line contains `if is_xxx_available() and `is_yyy_available()`) returns all backends joined on `_and_` (so `xxx_and_yyy` for instance). """ if _re_test_backend.search(line) is None: return None backends = [b[0] for b in _re_backend.findall(line)] backends.sort() return "_and_".join(backends) def read_init() -> Dict[str, List[str]]: """ Read the init and extract backend-specific objects. Returns: Dict[str, List[str]]: A dictionary mapping backend name to the list of object names requiring that backend. """ with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Get to the point we do the actual imports for type checking line_index = 0 while not lines[line_index].startswith("if TYPE_CHECKING"): line_index += 1 backend_specific_objects = {} # Go through the end of the file while line_index < len(lines): # If the line is an if is_backend_available, we grab all objects associated. backend = find_backend(lines[line_index]) if backend is not None: while not lines[line_index].startswith(" else:"): line_index += 1 line_index += 1 objects = [] # Until we unindent, add backend objects to the list while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8): line = lines[line_index] single_line_import_search = _re_single_line_import.search(line) if single_line_import_search is not None: # Single-line imports objects.extend(single_line_import_search.groups()[0].split(", ")) elif line.startswith(" " * 12): # Multiple-line imports (with 3 indent level) objects.append(line[12:-2]) line_index += 1 backend_specific_objects[backend] = objects else: line_index += 1 return backend_specific_objects def create_dummy_object(name: str, backend_name: str) -> str: """ Create the code for a dummy object. Args: name (`str`): The name of the object. backend_name (`str`): The name of the backend required for that object. Returns: `str`: The code of the dummy object. """ if name.isupper(): return DUMMY_CONSTANT.format(name) elif name.islower(): return DUMMY_FUNCTION.format(name, backend_name) else: return DUMMY_CLASS.format(name, backend_name) def create_dummy_files(backend_specific_objects: Optional[Dict[str, List[str]]] = None) -> Dict[str, str]: """ Create the content of the dummy files. Args: backend_specific_objects (`Dict[str, List[str]]`, *optional*): The mapping backend name to list of backend-specific objects. If not passed, will be obtained by calling `read_init()`. Returns: `Dict[str, str]`: A dictionary mapping backend name to code of the corresponding backend file. """ if backend_specific_objects is None: backend_specific_objects = read_init() dummy_files = {} for backend, objects in backend_specific_objects.items(): backend_name = "[" + ", ".join(f'"{b}"' for b in backend.split("_and_")) + "]" dummy_file = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(o, backend_name) for o in objects]) dummy_files[backend] = dummy_file return dummy_files def check_dummies(overwrite: bool = False): """ Check if the dummy files are up to date and maybe `overwrite` with the right content. Args: overwrite (`bool`, *optional*, default to `False`): Whether or not to overwrite the content of the dummy files. Will raise an error if they are not up to date when `overwrite=False`. """ dummy_files = create_dummy_files() # For special correspondence backend name to shortcut as used in utils/dummy_xxx_objects.py short_names = {"torch": "pt"} # Locate actual dummy modules and read their content. path = os.path.join(PATH_TO_TRANSFORMERS, "utils") dummy_file_paths = { backend: os.path.join(path, f"dummy_{short_names.get(backend, backend)}_objects.py") for backend in dummy_files.keys() } actual_dummies = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(file_path): with open(file_path, "r", encoding="utf-8", newline="\n") as f: actual_dummies[backend] = f.read() else: actual_dummies[backend] = "" # Compare actual with what they should be. for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"Updating transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main " "__init__ has new objects." ) with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n") as f: f.write(dummy_files[backend]) else: raise ValueError( "The main __init__ has objects that are not present in " f"transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py. Run `make fix-copies` " "to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_dummies(args.fix_and_overwrite)
transformers/utils/check_dummies.py/0
{ "file_path": "transformers/utils/check_dummies.py", "repo_id": "transformers", "token_count": 3337 }
399
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def get_daily_ci_runs(token, num_runs=7): """Get the workflow runs of the scheduled (daily) CI. This only selects the runs triggered by the `schedule` event on the `main` branch. """ headers = None if token is not None: headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"} # The id of a workflow (not of a workflow run) workflow_id = "636036" url = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}" result = requests.get(url, headers=headers).json() return result["workflow_runs"] def get_last_daily_ci_runs(token): """Get the last completed workflow run id of the scheduled (daily) CI.""" workflow_runs = get_daily_ci_runs(token) workflow_run_id = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": workflow_run_id = workflow_run["id"] break return workflow_run_id def get_last_daily_ci_artifacts(artifact_names, output_dir, token): """Get the artifacts of last completed workflow run id of the scheduled (daily) CI.""" workflow_run_id = get_last_daily_ci_runs(token) if workflow_run_id is not None: artifacts_links = get_artifacts_links(worflow_run_id=workflow_run_id, token=token) for artifact_name in artifact_names: if artifact_name in artifacts_links: artifact_url = artifacts_links[artifact_name] download_artifact( artifact_name=artifact_name, artifact_url=artifact_url, output_dir=output_dir, token=token ) def get_last_daily_ci_reports(artifact_names, output_dir, token): """Get the artifacts' content of the last completed workflow run id of the scheduled (daily) CI.""" get_last_daily_ci_artifacts(artifact_names, output_dir, token) results = {} for artifact_name in artifact_names: artifact_zip_path = os.path.join(output_dir, f"{artifact_name}.zip") if os.path.isfile(artifact_zip_path): results[artifact_name] = {} with zipfile.ZipFile(artifact_zip_path) as z: for filename in z.namelist(): if not os.path.isdir(filename): # read the file with z.open(filename) as f: results[artifact_name][filename] = f.read().decode("UTF-8") return results
transformers/utils/get_previous_daily_ci.py/0
{ "file_path": "transformers/utils/get_previous_daily_ci.py", "repo_id": "transformers", "token_count": 1109 }
400
import numpy as np from transformers import Pipeline def softmax(outputs): maxes = np.max(outputs, axis=-1, keepdims=True) shifted_exp = np.exp(outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) class PairClassificationPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "second_text" in kwargs: preprocess_kwargs["second_text"] = kwargs["second_text"] return preprocess_kwargs, {}, {} def preprocess(self, text, second_text=None): return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework) def _forward(self, model_inputs): return self.model(**model_inputs) def postprocess(self, model_outputs): logits = model_outputs.logits[0].numpy() probabilities = softmax(logits) best_class = np.argmax(probabilities) label = self.model.config.id2label[best_class] score = probabilities[best_class].item() logits = logits.tolist() return {"label": label, "score": score, "logits": logits}
transformers/utils/test_module/custom_pipeline.py/0
{ "file_path": "transformers/utils/test_module/custom_pipeline.py", "repo_id": "transformers", "token_count": 453 }
401
from dataclasses import dataclass import tyro from huggingface_hub import HfApi @dataclass class Args: folder_path: str = "benchmark/trl" path_in_repo: str = "images/benchmark" repo_id: str = "trl-internal-testing/example-images" repo_type: str = "dataset" args = tyro.cli(Args) api = HfApi() api.upload_folder( folder_path=args.folder_path, path_in_repo=args.path_in_repo, repo_id=args.repo_id, repo_type=args.repo_type, )
trl/benchmark/upload_benchmark.py/0
{ "file_path": "trl/benchmark/upload_benchmark.py", "repo_id": "trl", "token_count": 200 }
402
# Iterative Trainer Iterative fine-tuning is a training method that enables to perform custom actions (generation and filtering for example) between optimization steps. In TRL we provide an easy-to-use API to fine-tune your models in an iterative way in just a few lines of code. ## Usage To get started quickly, instantiate an instance a model, and a tokenizer. ```python model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token trainer = IterativeSFTTrainer( model, tokenizer ) ``` You have the choice to either provide a list of strings or a list of tensors to the step function. #### Using a list of tensors as input: ```python inputs = { "input_ids": input_ids, "attention_mask": attention_mask } trainer.step(**inputs) ``` #### Using a list of strings as input: ```python inputs = { "texts": texts } trainer.step(**inputs) ``` For causal language models, labels will automatically be created from input_ids or from texts. When using sequence to sequence models you will have to provide your own labels or text_labels. ## IterativeTrainer [[autodoc]] IterativeSFTTrainer
trl/docs/source/iterative_sft_trainer.mdx/0
{ "file_path": "trl/docs/source/iterative_sft_trainer.mdx", "repo_id": "trl", "token_count": 389 }
403
# This is an example configuration file of TRL CLI, you can use it for # SFT like that: `trl sft --config config.yaml --output_dir test-sft` # The YAML file supports environment variables by adding an `env` field # as below # env: # CUDA_VISIBLE_DEVICES: 0 model_name_or_path: HuggingFaceM4/tiny-random-LlamaForCausalLM dataset_name: imdb dataset_text_field: text report_to: none learning_rate: 0.0001 lr_scheduler_type: cosine
trl/example_config.yaml/0
{ "file_path": "trl/example_config.yaml", "repo_id": "trl", "token_count": 168 }
404
# RLHF pipeline for the creation of StackLLaMa: a Stack exchange llama-7b model. There were three main steps to the training process: 1. Supervised fine-tuning of the base llama-7b model to create llama-7b-se: - `torchrun --nnodes 1 --nproc_per_node 8 examples/research_projects/stack_llama/scripts/supervised_finetuning.py --model_path=<LLAMA_MODEL_PATH> --streaming --learning_rate 1e-5 --max_steps 5000 --output_dir ./llama-se` 2. Reward modeling using dialog pairs from the SE dataset using the llama-7b-se to create llama-7b-se-rm: - `torchrun --nnodes 1 --nproc_per_node 8 examples/research_projects/stack_llama/scripts/reward_modeling.py --model_name=<LLAMA_SE_MODEL>` 3. RL fine-tuning of llama-7b-se with the llama-7b-se-rm reward model: - `accelerate launch --multi_gpu --num_machines 1 --num_processes 8 examples/research_projects/stack_llama/scripts/rl_training.py --log_with=wandb --model_name=<LLAMA_SE_MODEL> --reward_model_name=<LLAMA_SE_RM_MODEL> --adafactor=False --tokenizer_name=<LLAMA_TOKENIZER> --save_freq=100 --output_max_length=128 --batch_size=8 --gradient_accumulation_steps=8 --batched_gen=True --ppo_epochs=4 --seed=0 --learning_rate=1.4e-5 --early_stopping=True --output_dir=llama-se-rl-finetune-128-8-8-1.4e-5_adam` LoRA layers were using at all stages to reduce memory requirements. At each stage the peft adapter layers were merged with the base model, using: ```shell python examples/research_projects/stack_llama/scripts/merge_peft_adapter.py --adapter_model_name=XXX --base_model_name=YYY --output_name=ZZZ ``` Note that this script requires `peft>=0.3.0`. For access to the base llama-7b model, please see Meta's [release](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) and [request form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform).
trl/examples/research_projects/stack_llama/scripts/README.md/0
{ "file_path": "trl/examples/research_projects/stack_llama/scripts/README.md", "repo_id": "trl", "token_count": 696 }
405
examples: llama: text: There is a Llama in my lawn, how can I get rid of it? code: text: Write a Python function that integrates any Python function f(x) numerically over an arbitrary interval [x_start, x_end]. helicopter: text: How many helicopters can a human eat in one sitting? numbers: text: Count to 10 but skip every number ending with an 'e' birds: text: Why aren't birds real? socks: text: Why is it important to eat socks after meditating?
trl/examples/scripts/config/default_chat_config.yaml/0
{ "file_path": "trl/examples/scripts/config/default_chat_config.yaml", "repo_id": "trl", "token_count": 152 }
406
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from functools import partial from unittest.mock import patch import pytest import torch from transformers import AutoModelForCausalLM, AutoTokenizer from .testing_utils import is_peft_available, require_peft class DummyDataset(torch.utils.data.Dataset): def __init__(self, query_data, response_data): self.query_data = query_data self.response_data = response_data def __len__(self): return len(self.query_data) def __getitem__(self, idx): return self.query_data[idx], self.response_data[idx] EXPECTED_STATS = [ "objective/kl", "objective/kl_dist", "objective/logprobs", "objective/ref_logprobs", "objective/kl_coef", "objective/entropy", "ppo/mean_non_score_reward", "ppo/loss/policy", "ppo/loss/value", "ppo/loss/total", "ppo/policy/entropy", "ppo/policy/approxkl", "ppo/policy/policykl", "ppo/policy/clipfrac", "ppo/policy/advantages", "ppo/policy/advantages_mean", "ppo/policy/ratio", "ppo/returns/mean", "ppo/returns/var", "ppo/val/vpred", "ppo/val/error", "ppo/val/clipfrac", "ppo/val/mean", "ppo/val/var", "ppo/val/var_explained", "time/ppo/forward_pass", "time/ppo/compute_rewards", "time/ppo/optimize_step", "time/ppo/calc_stats", "time/ppo/total", "ppo/learning_rate", ] @require_peft class TestPeftDependancy(unittest.TestCase): def setUp(self): self.causal_lm_model_id = "trl-internal-testing/tiny-random-GPTNeoXForCausalLM" self.seq_to_seq_model_id = "trl-internal-testing/tiny-random-T5ForConditionalGeneration" if is_peft_available(): from peft import LoraConfig, get_peft_model lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) self.peft_model = get_peft_model(causal_lm_model, lora_config) def test_no_peft(self): with patch.dict(sys.modules, {"peft": None}): from trl import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead # Check that loading a model with `peft` will raise an error with pytest.raises(ModuleNotFoundError): import peft # noqa: F401 _trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(self.causal_lm_model_id) _trl_seq2seq_model = AutoModelForSeq2SeqLMWithValueHead.from_pretrained(self.seq_to_seq_model_id) def test_imports_no_peft(self): with patch.dict(sys.modules, {"peft": None}): from trl import ( # noqa: F401 AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, PPOConfig, PPOTrainer, PreTrainedModelWrapper, ) def test_ppo_trainer_no_peft(self): with patch.dict(sys.modules, {"peft": None}): from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer ppo_model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(ppo_model_id) tokenizer = AutoTokenizer.from_pretrained(ppo_model_id) tokenizer.pad_token_id = tokenizer.eos_token_id ppo_config = PPOConfig(batch_size=2, mini_batch_size=1, log_with=None) dummy_dataset = DummyDataset( [torch.LongTensor([0, 1, 0, 1, 0, 1]), torch.LongTensor([0, 1, 0, 1, 0, 1])], [torch.LongTensor([1, 0, 1, 0, 1, 0]), torch.LongTensor([0, 1, 0, 1, 0, 1])], ) ppo_trainer = PPOTrainer( config=ppo_config, model=trl_model, ref_model=None, tokenizer=tokenizer, dataset=dummy_dataset, ) ppo_trainer.optimizer.zero_grad = partial(ppo_trainer.optimizer.zero_grad, set_to_none=False) dummy_dataloader = ppo_trainer.dataloader for query_tensor, response_tensor in dummy_dataloader: # define a reward for response # (this could be any reward such as human feedback or output from another model) reward = [torch.tensor(1.0), torch.tensor(0.0)] # train model train_stats = ppo_trainer.step(list(query_tensor), list(response_tensor), reward) break # check gradients are not None for _, param in trl_model.named_parameters(): if param.requires_grad: assert param.grad is not None # check expected stats for stat in EXPECTED_STATS: assert stat in train_stats
trl/tests/test_no_peft.py/0
{ "file_path": "trl/tests/test_no_peft.py", "repo_id": "trl", "token_count": 2576 }
407
import logging from typing import Callable, Literal, Optional, Union from datasets import Dataset, Value from transformers import AutoTokenizer from ..trainer.utils import ConstantLengthDataset FORMAT_MAPPING = { "chatml": [{"content": Value(dtype="string", id=None), "role": Value(dtype="string", id=None)}], "instruction": {"completion": Value(dtype="string", id=None), "prompt": Value(dtype="string", id=None)}, } def conversations_formatting_function(tokenizer: AutoTokenizer, messages_field: Literal["messages", "conversations"]): r""" return a callable function that takes in a "messages" dataset and returns a formatted dataset, based on the tokenizer apply chat template to the dataset """ def format_dataset(examples): if isinstance(examples[messages_field][0], list): output_texts = [] for i in range(len(examples[messages_field])): output_texts.append(tokenizer.apply_chat_template(examples[messages_field][i], tokenize=False)) return output_texts else: return tokenizer.apply_chat_template(examples[messages_field], tokenize=False) return format_dataset def instructions_formatting_function(tokenizer: AutoTokenizer): r""" return a callable function that takes in an "instructions" dataset and returns a formatted dataset, based on the tokenizer apply chat template to the dataset """ def format_dataset(examples): if isinstance(examples["prompt"], list): output_texts = [] for i in range(len(examples["prompt"])): converted_sample = [ {"role": "user", "content": examples["prompt"][i]}, {"role": "assistant", "content": examples["completion"][i]}, ] output_texts.append(tokenizer.apply_chat_template(converted_sample, tokenize=False)) return output_texts else: converted_sample = [ {"role": "user", "content": examples["prompt"]}, {"role": "assistant", "content": examples["completion"]}, ] return tokenizer.apply_chat_template(converted_sample, tokenize=False) return format_dataset def get_formatting_func_from_dataset( dataset: Union[Dataset, ConstantLengthDataset], tokenizer: AutoTokenizer ) -> Optional[Callable]: r""" Finds the correct formatting function based on the dataset structure. Currently supported datasets are: - `ChatML` with [{"role": str, "content": str}] - `instruction` with [{"prompt": str, "completion": str}] Args: dataset (Dataset): User dataset tokenizer (AutoTokenizer): Tokenizer used for formatting Returns: Callable: Formatting function if the dataset format is supported else None """ if isinstance(dataset, Dataset): if "messages" in dataset.features: if dataset.features["messages"] == FORMAT_MAPPING["chatml"]: logging.info("Formatting dataset with chatml format") return conversations_formatting_function(tokenizer, "messages") if "conversations" in dataset.features: if dataset.features["conversations"] == FORMAT_MAPPING["chatml"]: logging.info("Formatting dataset with chatml format") return conversations_formatting_function(tokenizer, "conversations") elif dataset.features == FORMAT_MAPPING["instruction"]: logging.info("Formatting dataset with instruction format") return instructions_formatting_function(tokenizer) return None
trl/trl/extras/dataset_formatting.py/0
{ "file_path": "trl/trl/extras/dataset_formatting.py", "repo_id": "trl", "token_count": 1400 }
408